code
stringlengths
87
55.2k
code_codestyle
int64
0
349
style_context
stringlengths
135
49.1k
style_context_codestyle
int64
0
349
label
int64
0
1
from __future__ import annotations from decimal import Decimal from numpy import array def lowercase_ ( _lowerCamelCase : list[list[float]]): lowercase__ : Tuple = Decimal # Check if the provided matrix has 2 rows and 2 columns # since this implementation only works for 2x2 matrices if len(_lowerCamelCase) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2: # Calculate the determinant of the matrix lowercase__ : str = float( d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1])) if determinant == 0: raise ValueError("This matrix has no inverse.") # Creates a copy of the matrix with swapped positions of the elements lowercase__ : Dict = [[0.0, 0.0], [0.0, 0.0]] lowercase__ , lowercase__ : Union[str, Any] = matrix[1][1], matrix[0][0] lowercase__ , lowercase__ : int = -matrix[1][0], -matrix[0][1] # Calculate the inverse of the matrix return [ [(float(d(_lowerCamelCase)) / determinant) or 0.0 for n in row] for row in swapped_matrix ] elif ( len(_lowerCamelCase) == 3 and len(matrix[0]) == 3 and len(matrix[1]) == 3 and len(matrix[2]) == 3 ): # Calculate the determinant of the matrix using Sarrus rule lowercase__ : Any = float( ( (d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2])) + (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0])) + (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1])) ) - ( (d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0])) + (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2])) + (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1])) )) if determinant == 0: raise ValueError("This matrix has no inverse.") # Creating cofactor matrix lowercase__ : Optional[Any] = [ [d(0.0), d(0.0), d(0.0)], [d(0.0), d(0.0), d(0.0)], [d(0.0), d(0.0), d(0.0)], ] lowercase__ : Optional[int] = (d(matrix[1][1]) * d(matrix[2][2])) - ( d(matrix[1][2]) * d(matrix[2][1]) ) lowercase__ : List[Any] = -( (d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0])) ) lowercase__ : List[Any] = (d(matrix[1][0]) * d(matrix[2][1])) - ( d(matrix[1][1]) * d(matrix[2][0]) ) lowercase__ : Optional[Any] = -( (d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1])) ) lowercase__ : Optional[int] = (d(matrix[0][0]) * d(matrix[2][2])) - ( d(matrix[0][2]) * d(matrix[2][0]) ) lowercase__ : int = -( (d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0])) ) lowercase__ : int = (d(matrix[0][1]) * d(matrix[1][2])) - ( d(matrix[0][2]) * d(matrix[1][1]) ) lowercase__ : Any = -( (d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0])) ) lowercase__ : List[Any] = (d(matrix[0][0]) * d(matrix[1][1])) - ( d(matrix[0][1]) * d(matrix[1][0]) ) # Transpose the cofactor matrix (Adjoint matrix) lowercase__ : List[Any] = array(_lowerCamelCase) for i in range(3): for j in range(3): lowercase__ : int = cofactor_matrix[j][i] # Inverse of the matrix using the formula (1/determinant) * adjoint matrix lowercase__ : List[Any] = array(_lowerCamelCase) for i in range(3): for j in range(3): inverse_matrix[i][j] /= d(_lowerCamelCase) # Calculate the inverse of the matrix return [[float(d(_lowerCamelCase)) or 0.0 for n in row] for row in inverse_matrix] raise ValueError("Please provide a matrix of size 2x2 or 3x3.")
87
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
1
import inspect import unittest from transformers import YolosConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import YolosForObjectDetection, YolosModel from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case_ : def __init__( self : Union[str, Any] , lowercase_ : str , lowercase_ : int=13 , lowercase_ : Union[str, Any]=[30, 30] , lowercase_ : List[str]=2 , lowercase_ : str=3 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : List[str]=32 , lowercase_ : str=5 , lowercase_ : Tuple=4 , lowercase_ : Any=37 , lowercase_ : Any="gelu" , lowercase_ : Optional[Any]=0.1 , lowercase_ : str=0.1 , lowercase_ : Any=10 , lowercase_ : List[str]=0.02 , lowercase_ : Union[str, Any]=3 , lowercase_ : str=None , lowercase_ : List[str]=8 , lowercase_ : Any=10 , ) -> Dict: lowercase__ : Any = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Tuple = image_size lowercase__ : List[Any] = patch_size lowercase__ : int = num_channels lowercase__ : str = is_training lowercase__ : Any = use_labels lowercase__ : str = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[Any] = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : Any = hidden_dropout_prob lowercase__ : List[Any] = attention_probs_dropout_prob lowercase__ : Tuple = type_sequence_label_size lowercase__ : int = initializer_range lowercase__ : List[Any] = num_labels lowercase__ : Union[str, Any] = scope lowercase__ : Tuple = n_targets lowercase__ : List[Any] = num_detection_tokens # we set the expected sequence length (which is used in several tests) # expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens lowercase__ : List[Any] = (image_size[1] // patch_size) * (image_size[0] // patch_size) lowercase__ : Tuple = num_patches + 1 + self.num_detection_tokens def __UpperCamelCase ( self : Dict ) -> Tuple: lowercase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] ) lowercase__ : Optional[Any] = None if self.use_labels: # labels is a list of Dict (each Dict being the labels for a given example in the batch) lowercase__ : Dict = [] for i in range(self.batch_size ): lowercase__ : List[str] = {} lowercase__ : int = torch.randint( high=self.num_labels , size=(self.n_targets,) , device=lowercase_ ) lowercase__ : int = torch.rand(self.n_targets , 4 , device=lowercase_ ) labels.append(lowercase_ ) lowercase__ : Optional[Any] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : str ) -> List[str]: return YolosConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str] , lowercase_ : Optional[int] ) -> Dict: lowercase__ : Optional[Any] = YolosModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Optional[Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) ) def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Union[str, Any] ) -> str: lowercase__ : Optional[Any] = YolosForObjectDetection(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : str = model(pixel_values=lowercase_ ) lowercase__ : str = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) lowercase__ : Union[str, Any] = model(pixel_values=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) ) self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) ) def __UpperCamelCase ( self : Tuple ) -> Dict: lowercase__ : Dict = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : Tuple = config_and_inputs lowercase__ : Optional[Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : Optional[Any] = (YolosModel, YolosForObjectDetection) if is_torch_available() else () __A : Union[str, Any] = ( {"feature-extraction": YolosModel, "object-detection": YolosForObjectDetection} if is_torch_available() else {} ) __A : Optional[Any] = False __A : Tuple = False __A : str = False __A : Optional[Any] = False def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : str , lowercase_ : List[str]=False ) -> str: lowercase__ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ ) if return_labels: if model_class.__name__ == "YolosForObjectDetection": lowercase__ : int = [] for i in range(self.model_tester.batch_size ): lowercase__ : Union[str, Any] = {} lowercase__ : int = torch.ones( size=(self.model_tester.n_targets,) , device=lowercase_ , dtype=torch.long ) lowercase__ : List[str] = torch.ones( self.model_tester.n_targets , 4 , device=lowercase_ , dtype=torch.float ) labels.append(lowercase_ ) lowercase__ : Any = labels return inputs_dict def __UpperCamelCase ( self : str ) -> List[Any]: lowercase__ : Optional[Any] = YolosModelTester(self ) lowercase__ : int = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def __UpperCamelCase ( self : Optional[int] ) -> str: self.config_tester.run_common_tests() def __UpperCamelCase ( self : Tuple ) -> Any: # YOLOS does not use inputs_embeds pass def __UpperCamelCase ( self : str ) -> int: lowercase__ , lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : int = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : Optional[int] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def __UpperCamelCase ( self : Optional[Any] ) -> Dict: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : List[Any] = model_class(lowercase_ ) lowercase__ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Dict = [*signature.parameters.keys()] lowercase__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> Dict: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def __UpperCamelCase ( self : Any ) -> Any: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : List[str] = True # in YOLOS, the seq_len is different lowercase__ : Dict = self.model_tester.expected_seq_len for model_class in self.all_model_classes: lowercase__ : Tuple = True lowercase__ : int = False lowercase__ : List[str] = True lowercase__ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : List[str] = True lowercase__ : List[str] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) lowercase__ : int = len(lowercase_ ) # Check attention is always last and order is fine lowercase__ : Optional[Any] = True lowercase__ : Any = True lowercase__ : Dict = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : int = 1 self.assertEqual(out_len + added_hidden_states , len(lowercase_ ) ) lowercase__ : List[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: def check_hidden_states_output(lowercase_ : Dict , lowercase_ : str , lowercase_ : Optional[int] ): lowercase__ : int = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : List[Any] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : Tuple = outputs.hidden_states lowercase__ : Union[str, Any] = getattr( self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 ) self.assertEqual(len(lowercase_ ) , lowercase_ ) # YOLOS has a different seq_length lowercase__ : str = self.model_tester.expected_seq_len self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Optional[Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : List[str] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_object_detection(*lowercase_ ) @slow def __UpperCamelCase ( self : List[str] ) -> Any: for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : str = YolosModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowercase_ ( ): lowercase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class snake_case_ ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : Any ) -> Any: return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None @slow def __UpperCamelCase ( self : Optional[Any] ) -> int: lowercase__ : Tuple = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(lowercase_ ) lowercase__ : List[Any] = self.default_image_processor lowercase__ : Dict = prepare_img() lowercase__ : str = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase__ : str = model(inputs.pixel_values ) # verify outputs lowercase__ : Dict = torch.Size((1, 1_00, 92) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase__ : Optional[Any] = torch.tensor( [[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=lowercase_ , ) lowercase__ : int = torch.tensor( [[0.25_59, 0.54_55, 0.47_06], [0.29_89, 0.72_79, 0.18_75], [0.77_32, 0.40_17, 0.44_62]] , device=lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1E-4 ) ) self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase_ , atol=1E-4 ) ) # verify postprocessing lowercase__ : int = image_processor.post_process_object_detection( lowercase_ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0] lowercase__ : Any = torch.tensor([0.99_94, 0.97_90, 0.99_64, 0.99_72, 0.98_61] ).to(lowercase_ ) lowercase__ : Union[str, Any] = [75, 75, 17, 63, 17] lowercase__ : Optional[int] = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(lowercase_ ) self.assertEqual(len(results["scores"] ) , 5 ) self.assertTrue(torch.allclose(results["scores"] , lowercase_ , atol=1E-4 ) ) self.assertSequenceEqual(results["labels"].tolist() , lowercase_ ) self.assertTrue(torch.allclose(results["boxes"][0, :] , lowercase_ ) )
87
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
1
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
1
from math import factorial, radians def lowercase_ ( _lowerCamelCase : float , _lowerCamelCase : int = 18 , _lowerCamelCase : int = 10): lowercase__ : Tuple = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0) # Converting from degrees to radians lowercase__ : Union[str, Any] = radians(_lowerCamelCase) lowercase__ : Any = angle_in_radians lowercase__ : List[str] = 3 lowercase__ : List[str] = -1 for _ in range(_lowerCamelCase): result += (b * (angle_in_radians**a)) / factorial(_lowerCamelCase) lowercase__ : List[Any] = -b # One positive term and the next will be negative and so on... a += 2 # Increased by 2 for every term. return round(_lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": __import__('''doctest''').testmod()
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
import inspect from typing import Optional, Union import numpy as np import PIL import torch from torch.nn import functional as F from torchvision import transforms from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, DiffusionPipeline, DPMSolverMultistepScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel, ) from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput from diffusers.utils import ( PIL_INTERPOLATION, randn_tensor, ) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]): if isinstance(_lowerCamelCase , torch.Tensor): return image elif isinstance(_lowerCamelCase , PIL.Image.Image): lowercase__ : List[str] = [image] if isinstance(image[0] , PIL.Image.Image): lowercase__ : Any = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] lowercase__ : Tuple = np.concatenate(_lowerCamelCase , axis=0) lowercase__ : Any = np.array(_lowerCamelCase).astype(np.floataa) / 255.0 lowercase__ : List[Any] = image.transpose(0 , 3 , 1 , 2) lowercase__ : Tuple = 2.0 * image - 1.0 lowercase__ : Optional[int] = torch.from_numpy(_lowerCamelCase) elif isinstance(image[0] , torch.Tensor): lowercase__ : Any = torch.cat(_lowerCamelCase , dim=0) return image def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str=0.9995): if not isinstance(_lowerCamelCase , np.ndarray): lowercase__ : List[str] = True lowercase__ : Tuple = va.device lowercase__ : Union[str, Any] = va.cpu().numpy() lowercase__ : str = va.cpu().numpy() lowercase__ : Tuple = np.sum(va * va / (np.linalg.norm(_lowerCamelCase) * np.linalg.norm(_lowerCamelCase))) if np.abs(_lowerCamelCase) > DOT_THRESHOLD: lowercase__ : Any = (1 - t) * va + t * va else: lowercase__ : Any = np.arccos(_lowerCamelCase) lowercase__ : List[Any] = np.sin(_lowerCamelCase) lowercase__ : List[Any] = theta_a * t lowercase__ : Optional[Any] = np.sin(_lowerCamelCase) lowercase__ : List[Any] = np.sin(theta_a - theta_t) / sin_theta_a lowercase__ : Dict = sin_theta_t / sin_theta_a lowercase__ : List[Any] = sa * va + sa * va if inputs_are_torch: lowercase__ : Tuple = torch.from_numpy(_lowerCamelCase).to(_lowerCamelCase) return va def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]): lowercase__ : Any = F.normalize(_lowerCamelCase , dim=-1) lowercase__ : Optional[Any] = F.normalize(_lowerCamelCase , dim=-1) return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): for param in model.parameters(): lowercase__ : str = value class snake_case_ ( __A ): def __init__( self : str , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , lowercase_ : CLIPFeatureExtractor , lowercase_ : Dict=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None , ) -> Union[str, Any]: super().__init__() self.register_modules( vae=lowercase_ , text_encoder=lowercase_ , clip_model=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , coca_model=lowercase_ , coca_tokenizer=lowercase_ , coca_transform=lowercase_ , ) lowercase__ : int = ( feature_extractor.size if isinstance(feature_extractor.size , lowercase_ ) else feature_extractor.size["shortest_edge"] ) lowercase__ : Any = transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std ) set_requires_grad(self.text_encoder , lowercase_ ) set_requires_grad(self.clip_model , lowercase_ ) def __UpperCamelCase ( self : Any , lowercase_ : Optional[Union[str, int]] = "auto" ) -> int: if slice_size == "auto": # half the attention head size is usually a good trade-off between # speed and memory lowercase__ : int = self.unet.config.attention_head_dim // 2 self.unet.set_attention_slice(lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> Tuple: self.enable_attention_slicing(lowercase_ ) def __UpperCamelCase ( self : Tuple ) -> int: set_requires_grad(self.vae , lowercase_ ) def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: set_requires_grad(self.vae , lowercase_ ) def __UpperCamelCase ( self : Dict ) -> Optional[Any]: set_requires_grad(self.unet , lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: set_requires_grad(self.unet , lowercase_ ) def __UpperCamelCase ( self : Dict , lowercase_ : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict ) -> List[Any]: # get the original timestep using init_timestep lowercase__ : Any = min(int(num_inference_steps * strength ) , lowercase_ ) lowercase__ : Optional[int] = max(num_inference_steps - init_timestep , 0 ) lowercase__ : List[Any] = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def __UpperCamelCase ( self : Optional[int] , lowercase_ : Any , lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Tuple=None ) -> Union[str, Any]: if not isinstance(lowercase_ , torch.Tensor ): raise ValueError(F'''`image` has to be of type `torch.Tensor` but is {type(lowercase_ )}''' ) lowercase__ : int = image.to(device=lowercase_ , dtype=lowercase_ ) if isinstance(lowercase_ , lowercase_ ): lowercase__ : Tuple = [ self.vae.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ ) ] lowercase__ : Any = torch.cat(lowercase_ , dim=0 ) else: lowercase__ : List[str] = self.vae.encode(lowercase_ ).latent_dist.sample(lowercase_ ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowercase__ : Union[str, Any] = 0.1_82_15 * init_latents lowercase__ : Optional[Any] = init_latents.repeat_interleave(lowercase_ , dim=0 ) lowercase__ : int = randn_tensor(init_latents.shape , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents lowercase__ : str = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = init_latents return latents def __UpperCamelCase ( self : Dict , lowercase_ : Tuple ) -> List[str]: lowercase__ : int = self.coca_transform(lowercase_ ).unsqueeze(0 ) with torch.no_grad(), torch.cuda.amp.autocast(): lowercase__ : int = self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype ) ) lowercase__ : List[Any] = self.coca_tokenizer.decode(generated[0].cpu().numpy() ) return generated.split("<end_of_text>" )[0].replace("<start_of_text>" , "" ).rstrip(" .," ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Any ) -> int: lowercase__ : Optional[int] = self.feature_extractor.preprocess(lowercase_ ) lowercase__ : List[Any] = torch.from_numpy(clip_image_input["pixel_values"][0] ).unsqueeze(0 ).to(self.device ).half() lowercase__ : int = self.clip_model.get_image_features(lowercase_ ) lowercase__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ ) lowercase__ : Tuple = image_embeddings_clip.repeat_interleave(lowercase_ , dim=0 ) return image_embeddings_clip @torch.enable_grad() def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : List[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int , lowercase_ : int , ) -> Optional[int]: lowercase__ : Dict = latents.detach().requires_grad_() lowercase__ : List[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ ) # predict the noise residual lowercase__ : Optional[Any] = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler) ): lowercase__ : Optional[int] = self.scheduler.alphas_cumprod[timestep] lowercase__ : Tuple = 1 - alpha_prod_t # compute predicted original sample from predicted noise also called # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf lowercase__ : Union[str, Any] = (latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5 lowercase__ : Optional[Any] = torch.sqrt(lowercase_ ) lowercase__ : List[str] = pred_original_sample * (fac) + latents * (1 - fac) elif isinstance(self.scheduler , lowercase_ ): lowercase__ : Tuple = self.scheduler.sigmas[index] lowercase__ : Any = latents - sigma * noise_pred else: raise ValueError(F'''scheduler type {type(self.scheduler )} not supported''' ) # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowercase__ : Dict = 1 / 0.1_82_15 * sample lowercase__ : Optional[Any] = self.vae.decode(lowercase_ ).sample lowercase__ : Optional[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowercase__ : List[str] = transforms.Resize(self.feature_extractor_size )(lowercase_ ) lowercase__ : Optional[Any] = self.normalize(lowercase_ ).to(latents.dtype ) lowercase__ : List[Any] = self.clip_model.get_image_features(lowercase_ ) lowercase__ : List[str] = image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=lowercase_ ) lowercase__ : int = spherical_dist_loss(lowercase_ , lowercase_ ).mean() * clip_guidance_scale lowercase__ : int = -torch.autograd.grad(lowercase_ , lowercase_ )[0] if isinstance(self.scheduler , lowercase_ ): lowercase__ : Optional[int] = latents.detach() + grads * (sigma**2) lowercase__ : Optional[int] = noise_pred_original else: lowercase__ : Union[str, Any] = noise_pred_original - torch.sqrt(lowercase_ ) * grads return noise_pred, latents @torch.no_grad() def __call__( self : Tuple , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ : Union[torch.FloatTensor, PIL.Image.Image] , lowercase_ : Optional[str] = None , lowercase_ : Optional[str] = None , lowercase_ : Optional[int] = 5_12 , lowercase_ : Optional[int] = 5_12 , lowercase_ : float = 0.6 , lowercase_ : Optional[int] = 50 , lowercase_ : Optional[float] = 7.5 , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[float] = 1_00 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : float = 0.8 , lowercase_ : float = 0.1 , lowercase_ : float = 0.1 , ) -> Dict: if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError(F'''You have passed {batch_size} batch_size, but only {len(lowercase_ )} generators.''' ) if height % 8 != 0 or width % 8 != 0: raise ValueError(F'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' ) if isinstance(lowercase_ , torch.Generator ) and batch_size > 1: lowercase__ : List[str] = [generator] + [None] * (batch_size - 1) lowercase__ : str = [ ("model", self.coca_model is None), ("tokenizer", self.coca_tokenizer is None), ("transform", self.coca_transform is None), ] lowercase__ : Dict = [x[0] for x in coca_is_none if x[1]] lowercase__ : int = ", ".join(lowercase_ ) # generate prompts with coca model if prompt is None if content_prompt is None: if len(lowercase_ ): raise ValueError( F'''Content prompt is None and CoCa [{coca_is_none_str}] is None.''' F'''Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) lowercase__ : Dict = self.get_image_description(lowercase_ ) if style_prompt is None: if len(lowercase_ ): raise ValueError( F'''Style prompt is None and CoCa [{coca_is_none_str}] is None.''' F''' Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.''' ) lowercase__ : Optional[int] = self.get_image_description(lowercase_ ) # get prompt text embeddings for content and style lowercase__ : Tuple = self.tokenizer( lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="pt" , ) lowercase__ : int = self.text_encoder(content_text_input.input_ids.to(self.device ) )[0] lowercase__ : List[str] = self.tokenizer( lowercase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=lowercase_ , return_tensors="pt" , ) lowercase__ : Optional[Any] = self.text_encoder(style_text_input.input_ids.to(self.device ) )[0] lowercase__ : int = slerp(lowercase_ , lowercase_ , lowercase_ ) # duplicate text embeddings for each generation per prompt lowercase__ : Any = text_embeddings.repeat_interleave(lowercase_ , dim=0 ) # set timesteps lowercase__ : Optional[int] = "offset" in set(inspect.signature(self.scheduler.set_timesteps ).parameters.keys() ) lowercase__ : Optional[Any] = {} if accepts_offset: lowercase__ : Optional[Any] = 1 self.scheduler.set_timesteps(lowercase_ , **lowercase_ ) # Some schedulers like PNDM have timesteps as arrays # It's more optimized to move all timesteps to correct device beforehand self.scheduler.timesteps.to(self.device ) lowercase__ , lowercase__ : Optional[int] = self.get_timesteps(lowercase_ , lowercase_ , self.device ) lowercase__ : str = timesteps[:1].repeat(lowercase_ ) # Preprocess image lowercase__ : int = preprocess(lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Tuple = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ ) lowercase__ : List[Any] = preprocess(lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , text_embeddings.dtype , self.device , lowercase_ ) lowercase__ : Any = slerp(lowercase_ , lowercase_ , lowercase_ ) if clip_guidance_scale > 0: lowercase__ : Tuple = self.get_clip_image_embeddings(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = self.get_clip_image_embeddings(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = slerp( lowercase_ , lowercase_ , lowercase_ ) # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. lowercase__ : Optional[int] = guidance_scale > 1.0 # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: lowercase__ : int = content_text_input.input_ids.shape[-1] lowercase__ : Optional[Any] = self.tokenizer([""] , padding="max_length" , max_length=lowercase_ , return_tensors="pt" ) lowercase__ : List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0] # duplicate unconditional embeddings for each generation per prompt lowercase__ : Union[str, Any] = uncond_embeddings.repeat_interleave(lowercase_ , dim=0 ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes lowercase__ : Tuple = torch.cat([uncond_embeddings, text_embeddings] ) # get the initial random noise unless the user supplied it # Unlike in other pipelines, latents need to be generated in the target device # for 1-to-1 results reproducibility with the CompVis implementation. # However this currently doesn't work in `mps`. lowercase__ : Union[str, Any] = (batch_size, self.unet.config.in_channels, height // 8, width // 8) lowercase__ : Union[str, Any] = text_embeddings.dtype if latents is None: if self.device.type == "mps": # randn does not work reproducibly on mps lowercase__ : str = torch.randn(lowercase_ , generator=lowercase_ , device="cpu" , dtype=lowercase_ ).to( self.device ) else: lowercase__ : Tuple = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_ ) else: if latents.shape != latents_shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' ) lowercase__ : List[Any] = latents.to(self.device ) # scale the initial noise by the standard deviation required by the scheduler lowercase__ : Any = latents * self.scheduler.init_noise_sigma # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] lowercase__ : List[Any] = "eta" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) lowercase__ : List[str] = {} if accepts_eta: lowercase__ : List[str] = eta # check if the scheduler accepts generator lowercase__ : Optional[Any] = "generator" in set(inspect.signature(self.scheduler.step ).parameters.keys() ) if accepts_generator: lowercase__ : Optional[int] = generator with self.progress_bar(total=lowercase_ ): for i, t in enumerate(lowercase_ ): # expand the latents if we are doing classifier free guidance lowercase__ : Tuple = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents lowercase__ : List[Any] = self.scheduler.scale_model_input(lowercase_ , lowercase_ ) # predict the noise residual lowercase__ : Tuple = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_ ).sample # perform classifier free guidance if do_classifier_free_guidance: lowercase__ , lowercase__ : int = noise_pred.chunk(2 ) lowercase__ : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) # perform clip guidance if clip_guidance_scale > 0: lowercase__ : Optional[Any] = ( text_embeddings.chunk(2 )[1] if do_classifier_free_guidance else text_embeddings ) lowercase__ , lowercase__ : Dict = self.cond_fn( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) # compute the previous noisy sample x_t -> x_t-1 lowercase__ : Tuple = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_ ).prev_sample # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor lowercase__ : Union[str, Any] = 1 / 0.1_82_15 * latents lowercase__ : Optional[Any] = self.vae.decode(lowercase_ ).sample lowercase__ : List[Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowercase__ : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowercase__ : Optional[int] = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image, None) return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_ )
87
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
1
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format='''%(message)s''') def lowercase_ ( _lowerCamelCase : np.ndarray): return input_array.reshape((input_array.size, 1)) def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : int): lowercase__ : int = np.nan for i in range(_lowerCamelCase): lowercase__ : List[Any] = features[:, labels == i] lowercase__ : Any = data.mean(1) # Centralize the data of class i lowercase__ : List[Any] = data - column_reshape(_lowerCamelCase) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(_lowerCamelCase , centered_data.T) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : str = np.dot(_lowerCamelCase , centered_data.T) return covariance_sum / features.shape[1] def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : int): lowercase__ : str = features.mean(1) lowercase__ : Any = np.nan for i in range(_lowerCamelCase): lowercase__ : List[Any] = features[:, labels == i] lowercase__ : Tuple = data.shape[1] lowercase__ : Tuple = data.mean(1) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase) , (column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase)).T , ) else: # If covariance_sum is np.nan (i.e. first loop) lowercase__ : str = device_data * np.dot( column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase) , (column_reshape(_lowerCamelCase) - column_reshape(_lowerCamelCase)).T , ) return covariance_sum / features.shape[1] def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : int): # Check if the features have been loaded if features.any(): lowercase__ : List[Any] = features.mean(1) # Center the dataset lowercase__ : Any = features - np.reshape(_lowerCamelCase , (data_mean.size, 1)) lowercase__ : Union[str, Any] = np.dot(_lowerCamelCase , centered_data.T) / features.shape[1] lowercase__ , lowercase__ : Optional[Any] = np.linalg.eigh(_lowerCamelCase) # Take all the columns in the reverse order (-1), and then takes only the first lowercase__ : Union[str, Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space lowercase__ : Dict = np.dot(filtered_eigenvectors.T , _lowerCamelCase) logging.info("Principal Component Analysis computed") return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase) logging.error("Dataset empty") raise AssertionError def lowercase_ ( _lowerCamelCase : np.ndarray , _lowerCamelCase : np.ndarray , _lowerCamelCase : int , _lowerCamelCase : int): assert classes > dimensions # Check if features have been already loaded if features.any: lowercase__ , lowercase__ : int = eigh( covariance_between_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , covariance_within_classes(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , ) lowercase__ : List[Any] = eigenvectors[:, ::-1][:, :dimensions] lowercase__ , lowercase__ , lowercase__ : Dict = np.linalg.svd(_lowerCamelCase) lowercase__ : Any = svd_matrix[:, 0:dimensions] lowercase__ : Optional[int] = np.dot(filtered_svd_matrix.T , _lowerCamelCase) logging.info("Linear Discriminant Analysis computed") return projected_data else: logging.basicConfig(level=logging.ERROR , format="%(message)s" , force=_lowerCamelCase) logging.error("Dataset empty") raise AssertionError def lowercase_ ( ): # Create dummy dataset with 2 classes and 3 features lowercase__ : Any = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]]) lowercase__ : Tuple = np.array([0, 0, 0, 1, 1]) lowercase__ : Tuple = 2 lowercase__ : Optional[int] = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(_lowerCamelCase) as error_info: lowercase__ : Tuple = linear_discriminant_analysis( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) if isinstance(_lowerCamelCase , np.ndarray): raise AssertionError( "Did not raise AssertionError for dimensions > classes") assert error_info.type is AssertionError def lowercase_ ( ): lowercase__ : List[str] = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) lowercase__ : Dict = 2 lowercase__ : List[str] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]]) with pytest.raises(_lowerCamelCase) as error_info: lowercase__ : Dict = principal_component_analysis(_lowerCamelCase , _lowerCamelCase) if not np.allclose(_lowerCamelCase , _lowerCamelCase): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
87
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
1
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) UpperCamelCase = { '''configuration_efficientformer''': [ '''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''EfficientFormerConfig''', ] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''EfficientFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''EfficientFormerForImageClassification''', '''EfficientFormerForImageClassificationWithTeacher''', '''EfficientFormerModel''', '''EfficientFormerPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFEfficientFormerForImageClassification''', '''TFEfficientFormerForImageClassificationWithTeacher''', '''TFEfficientFormerModel''', '''TFEfficientFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_efficientformer import EfficientFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_efficientformer import ( EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, EfficientFormerForImageClassification, EfficientFormerForImageClassificationWithTeacher, EfficientFormerModel, EfficientFormerPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_efficientformer import ( TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFEfficientFormerForImageClassification, TFEfficientFormerForImageClassificationWithTeacher, TFEfficientFormerModel, TFEfficientFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings( __A ,r"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " ,) class snake_case_ ( __A ): def __UpperCamelCase ( self : int , lowercase_ : GenericTensor ) -> np.ndarray: if self.framework == "tf": lowercase__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": lowercase__ : Optional[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase_ ) else: raise ValueError("Unsupported framework" ) return masked_index def __UpperCamelCase ( self : str , lowercase_ : GenericTensor ) -> np.ndarray: lowercase__ : str = self.get_masked_index(lowercase_ ) lowercase__ : Union[str, Any] = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , ) def __UpperCamelCase ( self : Dict , lowercase_ : GenericTensor ) -> Optional[Any]: if isinstance(lowercase_ , lowercase_ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(lowercase_ ) def __UpperCamelCase ( self : List[str] , lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Union[str, Any] ) -> Dict[str, GenericTensor]: if return_tensors is None: lowercase__ : int = self.framework lowercase__ : Dict = self.tokenizer(lowercase_ , return_tensors=lowercase_ ) self.ensure_exactly_one_mask_token(lowercase_ ) return model_inputs def __UpperCamelCase ( self : str , lowercase_ : List[Any] ) -> Dict: lowercase__ : Optional[int] = self.model(**lowercase_ ) lowercase__ : Union[str, Any] = model_inputs["input_ids"] return model_outputs def __UpperCamelCase ( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict=5 , lowercase_ : List[str]=None ) -> Optional[Any]: # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: lowercase__ : List[str] = target_ids.shape[0] lowercase__ : Dict = model_outputs["input_ids"][0] lowercase__ : Dict = model_outputs["logits"] if self.framework == "tf": lowercase__ : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] lowercase__ : Optional[Any] = outputs.numpy() lowercase__ : int = outputs[0, masked_index, :] lowercase__ : Optional[int] = stable_softmax(lowercase_ , axis=-1 ) if target_ids is not None: lowercase__ : str = tf.gather_nd(tf.squeeze(lowercase_ , 0 ) , target_ids.reshape(-1 , 1 ) ) lowercase__ : Dict = tf.expand_dims(lowercase_ , 0 ) lowercase__ : int = tf.math.top_k(lowercase_ , k=lowercase_ ) lowercase__ , lowercase__ : Any = topk.values.numpy(), topk.indices.numpy() else: lowercase__ : Tuple = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowercase_ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample lowercase__ : Dict = outputs[0, masked_index, :] lowercase__ : Union[str, Any] = logits.softmax(dim=-1 ) if target_ids is not None: lowercase__ : Optional[int] = probs[..., target_ids] lowercase__ , lowercase__ : str = probs.topk(lowercase_ ) lowercase__ : Optional[int] = [] lowercase__ : Tuple = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): lowercase__ : int = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place lowercase__ : str = input_ids.numpy().copy() if target_ids is not None: lowercase__ : str = target_ids[p].tolist() lowercase__ : str = p # Filter padding out: lowercase__ : List[Any] = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back lowercase__ : int = self.tokenizer.decode(lowercase_ , skip_special_tokens=lowercase_ ) lowercase__ : int = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(lowercase_ ) result.append(lowercase_ ) if single_mask: return result[0] return result def __UpperCamelCase ( self : Any , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=None ) -> str: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Tuple = [targets] try: lowercase__ : Tuple = self.tokenizer.get_vocab() except Exception: lowercase__ : Tuple = {} lowercase__ : Optional[int] = [] for target in targets: lowercase__ : Optional[int] = vocab.get(lowercase_ , lowercase_ ) if id_ is None: lowercase__ : str = self.tokenizer( lowercase_ , add_special_tokens=lowercase_ , return_attention_mask=lowercase_ , return_token_type_ids=lowercase_ , max_length=1 , truncation=lowercase_ , )["input_ids"] if len(lowercase_ ) == 0: logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' "We cannot replace it with anything meaningful, ignoring it" ) continue lowercase__ : Any = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( F'''The specified target token `{target}` does not exist in the model vocabulary. ''' F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' ) target_ids.append(id_ ) lowercase__ : Optional[int] = list(set(lowercase_ ) ) if len(lowercase_ ) == 0: raise ValueError("At least one target must be provided when passed." ) lowercase__ : Union[str, Any] = np.array(lowercase_ ) return target_ids def __UpperCamelCase ( self : str , lowercase_ : List[str]=None , lowercase_ : str=None ) -> List[str]: lowercase__ : List[str] = {} if targets is not None: lowercase__ : Tuple = self.get_target_ids(lowercase_ , lowercase_ ) lowercase__ : str = target_ids if top_k is not None: lowercase__ : Optional[Any] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : Dict , lowercase_ : str , *lowercase_ : Optional[Any] , **lowercase_ : Optional[int] ) -> Optional[int]: lowercase__ : List[Any] = super().__call__(lowercase_ , **lowercase_ ) if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) == 1: return outputs[0] return outputs
87
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
1
from __future__ import annotations import unittest from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available from transformers.testing_utils import require_tf, require_tokenizers, slow from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel @require_tf class snake_case_ : __A : List[str] = BlenderbotSmallConfig __A : Union[str, Any] = {} __A : Union[str, Any] = "gelu" def __init__( self : List[str] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int]=13 , lowercase_ : Union[str, Any]=7 , lowercase_ : str=True , lowercase_ : Optional[Any]=False , lowercase_ : Dict=99 , lowercase_ : Optional[Any]=32 , lowercase_ : Union[str, Any]=2 , lowercase_ : Optional[Any]=4 , lowercase_ : List[str]=37 , lowercase_ : Any=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Union[str, Any]=20 , lowercase_ : Optional[Any]=2 , lowercase_ : int=1 , lowercase_ : Union[str, Any]=0 , ) -> List[str]: lowercase__ : int = parent lowercase__ : Dict = batch_size lowercase__ : List[str] = seq_length lowercase__ : List[Any] = is_training lowercase__ : Any = use_labels lowercase__ : Dict = vocab_size lowercase__ : Dict = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Optional[Any] = num_attention_heads lowercase__ : Optional[Any] = intermediate_size lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : int = max_position_embeddings lowercase__ : Tuple = eos_token_id lowercase__ : Optional[int] = pad_token_id lowercase__ : Tuple = bos_token_id def __UpperCamelCase ( self : Any ) -> Tuple: lowercase__ : Dict = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ : Optional[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ : Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Optional[int] = self.config_cls( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , ) lowercase__ : Optional[Any] = prepare_blenderbot_small_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) return config, inputs_dict def __UpperCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : str ) -> Any: lowercase__ : Dict = TFBlenderbotSmallModel(config=lowercase_ ).get_decoder() lowercase__ : Union[str, Any] = inputs_dict["input_ids"] lowercase__ : str = input_ids[:1, :] lowercase__ : Any = inputs_dict["attention_mask"][:1, :] lowercase__ : Union[str, Any] = inputs_dict["head_mask"] lowercase__ : Optional[int] = 1 # first forward pass lowercase__ : Dict = model(lowercase_ , attention_mask=lowercase_ , head_mask=lowercase_ , use_cache=lowercase_ ) lowercase__ , lowercase__ : Dict = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__ : Union[str, Any] = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__ : Any = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__ : List[Any] = model(lowercase_ , attention_mask=lowercase_ )[0] lowercase__ : str = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__ : List[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__ : str = output_from_no_past[:, -3:, random_slice_idx] lowercase__ : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 ) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Optional[int]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Union[str, Any]=None , ): if attention_mask is None: lowercase__ : Optional[int] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id) , tf.inta) if decoder_attention_mask is None: lowercase__ : List[Any] = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id) , tf.inta), ] , axis=-1 , ) if head_mask is None: lowercase__ : int = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: lowercase__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: lowercase__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, "cross_attn_head_mask": cross_attn_head_mask, } @require_tf class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : Optional[int] = ( (TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else () ) __A : List[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else () __A : str = ( { "conversational": TFBlenderbotSmallForConditionalGeneration, "feature-extraction": TFBlenderbotSmallModel, "summarization": TFBlenderbotSmallForConditionalGeneration, "text2text-generation": TFBlenderbotSmallForConditionalGeneration, "translation": TFBlenderbotSmallForConditionalGeneration, } if is_tf_available() else {} ) __A : Any = True __A : Tuple = False __A : Union[str, Any] = False def __UpperCamelCase ( self : Dict ) -> List[Any]: lowercase__ : List[str] = TFBlenderbotSmallModelTester(self ) lowercase__ : Optional[int] = ConfigTester(self , config_class=lowercase_ ) def __UpperCamelCase ( self : str ) -> Tuple: self.config_tester.run_common_tests() def __UpperCamelCase ( self : Tuple ) -> Any: lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) @require_tokenizers @require_tf class snake_case_ ( unittest.TestCase ): __A : Union[str, Any] = [ "Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like " " i'm going to throw up.\nand why is that?" ] __A : str = "facebook/blenderbot_small-90M" @cached_property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: # use "old" tokenizer here because of bug when downloading new tokenizer return BlenderbotSmallTokenizer.from_pretrained("facebook/blenderbot-90M" ) @cached_property def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name ) return model @slow def __UpperCamelCase ( self : Any ) -> int: lowercase__ : Tuple = self.tokenizer(self.src_text , return_tensors="tf" ) lowercase__ : List[str] = self.model.generate( model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowercase_ , ) lowercase__ : str = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowercase_ )[0] assert generated_words in ( "i don't know. i just feel like i'm going to throw up. it's not fun.", "i'm not sure. i just feel like i've been feeling like i have to be in a certain place", "i'm not sure. i just feel like i've been in a bad situation.", )
87
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
1
UpperCamelCase = [ (1000, '''M'''), (900, '''CM'''), (500, '''D'''), (400, '''CD'''), (100, '''C'''), (90, '''XC'''), (50, '''L'''), (40, '''XL'''), (10, '''X'''), (9, '''IX'''), (5, '''V'''), (4, '''IV'''), (1, '''I'''), ] def lowercase_ ( _lowerCamelCase : str): lowercase__ : Optional[int] = {"I": 1, "V": 5, "X": 10, "L": 50, "C": 100, "D": 500, "M": 1000} lowercase__ : Optional[Any] = 0 lowercase__ : Any = 0 while place < len(_lowerCamelCase): if (place + 1 < len(_lowerCamelCase)) and (vals[roman[place]] < vals[roman[place + 1]]): total += vals[roman[place + 1]] - vals[roman[place]] place += 2 else: total += vals[roman[place]] place += 1 return total def lowercase_ ( _lowerCamelCase : int): lowercase__ : Union[str, Any] = [] for arabic, roman in ROMAN: ((lowercase__) , (lowercase__)) : Union[str, Any] = divmod(_lowerCamelCase , _lowerCamelCase) result.append(roman * factor) if number == 0: break return "".join(_lowerCamelCase) if __name__ == "__main__": import doctest doctest.testmod()
87
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
1
import logging from transformers.configuration_utils import PretrainedConfig UpperCamelCase = logging.getLogger(__name__) class snake_case_ ( __A ): __A : List[Any] = "masked_bert" def __init__( self : Optional[int] , lowercase_ : List[str]=3_05_22 , lowercase_ : Optional[Any]=7_68 , lowercase_ : Tuple=12 , lowercase_ : str=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : Dict=0.1 , lowercase_ : Union[str, Any]=0.1 , lowercase_ : List[Any]=5_12 , lowercase_ : List[str]=2 , lowercase_ : List[str]=0.02 , lowercase_ : int=1E-12 , lowercase_ : Any=0 , lowercase_ : List[Any]="topK" , lowercase_ : List[str]="constant" , lowercase_ : Dict=0.0 , **lowercase_ : Union[str, Any] , ) -> Optional[Any]: super().__init__(pad_token_id=lowercase_ , **lowercase_ ) lowercase__ : int = vocab_size lowercase__ : Optional[int] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : Dict = num_attention_heads lowercase__ : Optional[int] = hidden_act lowercase__ : Any = intermediate_size lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : str = max_position_embeddings lowercase__ : str = type_vocab_size lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = layer_norm_eps lowercase__ : Tuple = pruning_method lowercase__ : Union[str, Any] = mask_init lowercase__ : List[str] = mask_scale
87
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
1
from __future__ import annotations UpperCamelCase = tuple[int, int, int] UpperCamelCase = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase UpperCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' # -------------------------- default selection -------------------------- # rotors -------------------------- UpperCamelCase = '''EGZWVONAHDCLFQMSIPJBYUKXTR''' UpperCamelCase = '''FOBHMDKEXQNRAULPGSJVTYICZW''' UpperCamelCase = '''ZJXESIUQLHAVRMDOYGTNFWPBKC''' # reflector -------------------------- UpperCamelCase = { '''A''': '''N''', '''N''': '''A''', '''B''': '''O''', '''O''': '''B''', '''C''': '''P''', '''P''': '''C''', '''D''': '''Q''', '''Q''': '''D''', '''E''': '''R''', '''R''': '''E''', '''F''': '''S''', '''S''': '''F''', '''G''': '''T''', '''T''': '''G''', '''H''': '''U''', '''U''': '''H''', '''I''': '''V''', '''V''': '''I''', '''J''': '''W''', '''W''': '''J''', '''K''': '''X''', '''X''': '''K''', '''L''': '''Y''', '''Y''': '''L''', '''M''': '''Z''', '''Z''': '''M''', } # -------------------------- extra rotors -------------------------- UpperCamelCase = '''RMDJXFUWGISLHVTCQNKYPBEZOA''' UpperCamelCase = '''SGLCPQWZHKXAREONTFBVIYJUDM''' UpperCamelCase = '''HVSICLTYKQUBXDWAJZOMFGPREN''' UpperCamelCase = '''RZWQHFMVDBKICJLNTUXAGYPSOE''' UpperCamelCase = '''LFKIJODBEGAMQPXVUHYSTCZRWN''' UpperCamelCase = '''KOAEGVDHXPQZMLFTYWJNBRCIUS''' def lowercase_ ( _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT , _lowerCamelCase : str): # Checks if there are 3 unique rotors if (unique_rotsel := len(set(_lowerCamelCase))) < 3: lowercase__ : Optional[int] = f'''Please use 3 unique rotors (not {unique_rotsel})''' raise Exception(_lowerCamelCase) # Checks if rotor positions are valid lowercase__ , lowercase__ , lowercase__ : Tuple = rotpos if not 0 < rotorposa <= len(_lowerCamelCase): lowercase__ : List[str] = f'''First rotor position is not within range of 1..26 ({rotorposa}''' raise ValueError(_lowerCamelCase) if not 0 < rotorposa <= len(_lowerCamelCase): lowercase__ : List[Any] = f'''Second rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(_lowerCamelCase) if not 0 < rotorposa <= len(_lowerCamelCase): lowercase__ : List[str] = f'''Third rotor position is not within range of 1..26 ({rotorposa})''' raise ValueError(_lowerCamelCase) # Validates string and returns dict lowercase__ : Optional[Any] = _plugboard(_lowerCamelCase) return rotpos, rotsel, pbdict def lowercase_ ( _lowerCamelCase : str): # tests the input string if it # a) is type string # b) has even length (so pairs can be made) if not isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : List[str] = f'''Plugboard setting isn\'t type string ({type(_lowerCamelCase)})''' raise TypeError(_lowerCamelCase) elif len(_lowerCamelCase) % 2 != 0: lowercase__ : int = f'''Odd number of symbols ({len(_lowerCamelCase)})''' raise Exception(_lowerCamelCase) elif pbstring == "": return {} pbstring.replace(" " , "") # Checks if all characters are unique lowercase__ : Dict = set() for i in pbstring: if i not in abc: lowercase__ : List[str] = f'''\'{i}\' not in list of symbols''' raise Exception(_lowerCamelCase) elif i in tmppbl: lowercase__ : List[str] = f'''Duplicate symbol ({i})''' raise Exception(_lowerCamelCase) else: tmppbl.add(_lowerCamelCase) del tmppbl # Created the dictionary lowercase__ : Optional[Any] = {} for j in range(0 , len(_lowerCamelCase) - 1 , 2): lowercase__ : int = pbstring[j + 1] lowercase__ : str = pbstring[j] return pb def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : RotorPositionT , _lowerCamelCase : RotorSelectionT = (rotora, rotora, rotora) , _lowerCamelCase : str = "" , ): lowercase__ : int = text.upper() lowercase__ , lowercase__ , lowercase__ : Any = _validator( _lowerCamelCase , _lowerCamelCase , plugb.upper()) lowercase__ , lowercase__ , lowercase__ : str = rotor_position lowercase__ , lowercase__ , lowercase__ : Dict = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 lowercase__ : Optional[int] = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: lowercase__ : List[str] = plugboard[symbol] # rotor ra -------------------------- lowercase__ : List[Any] = abc.index(_lowerCamelCase) + rotorposa lowercase__ : Any = rotora[index % len(_lowerCamelCase)] # rotor rb -------------------------- lowercase__ : Optional[int] = abc.index(_lowerCamelCase) + rotorposa lowercase__ : Tuple = rotora[index % len(_lowerCamelCase)] # rotor rc -------------------------- lowercase__ : Dict = abc.index(_lowerCamelCase) + rotorposa lowercase__ : List[Any] = rotora[index % len(_lowerCamelCase)] # reflector -------------------------- # this is the reason you don't need another machine to decipher lowercase__ : Dict = reflector[symbol] # 2nd rotors lowercase__ : Union[str, Any] = abc[rotora.index(_lowerCamelCase) - rotorposa] lowercase__ : Dict = abc[rotora.index(_lowerCamelCase) - rotorposa] lowercase__ : Dict = abc[rotora.index(_lowerCamelCase) - rotorposa] # 2nd plugboard if symbol in plugboard: lowercase__ : Optional[int] = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(_lowerCamelCase): lowercase__ : Any = 0 rotorposa += 1 if rotorposa >= len(_lowerCamelCase): lowercase__ : int = 0 rotorposa += 1 if rotorposa >= len(_lowerCamelCase): lowercase__ : Optional[int] = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(_lowerCamelCase) return "".join(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = '''This is my Python script that emulates the Enigma machine from WWII.''' UpperCamelCase = (1, 1, 1) UpperCamelCase = '''pictures''' UpperCamelCase = (rotora, rotora, rotora) UpperCamelCase = enigma(message, rotor_pos, rotor_sel, pb) print('''Encrypted message:''', en) print('''Decrypted message:''', enigma(en, rotor_pos, rotor_sel, pb))
87
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''facebook/s2t-wav2vec2-large-en-de''': ( '''https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/config.json''' ), # See all Speech2Text models at https://huggingface.co/models?filter=speech2text2 } class snake_case_ ( __A ): __A : Optional[int] = "speech_to_text_2" __A : Optional[Any] = ["past_key_values"] __A : List[Any] = {"num_attention_heads": "decoder_attention_heads", "hidden_size": "d_model"} def __init__( self : Optional[Any] , lowercase_ : List[Any]=1_00_00 , lowercase_ : Any=6 , lowercase_ : Union[str, Any]=20_48 , lowercase_ : Any=4 , lowercase_ : Any=0.0 , lowercase_ : Tuple=True , lowercase_ : Union[str, Any]="relu" , lowercase_ : Dict=2_56 , lowercase_ : str=0.1 , lowercase_ : Optional[int]=0.0 , lowercase_ : Optional[int]=0.0 , lowercase_ : str=0.02 , lowercase_ : Optional[Any]=2 , lowercase_ : Tuple=True , lowercase_ : str=1 , lowercase_ : Dict=0 , lowercase_ : str=2 , lowercase_ : Any=10_24 , **lowercase_ : Optional[int] , ) -> Optional[Any]: lowercase__ : Union[str, Any] = vocab_size lowercase__ : List[str] = d_model lowercase__ : Tuple = decoder_ffn_dim lowercase__ : str = decoder_layers lowercase__ : Optional[int] = decoder_attention_heads lowercase__ : Optional[Any] = dropout lowercase__ : Tuple = attention_dropout lowercase__ : int = activation_dropout lowercase__ : Dict = activation_function lowercase__ : Optional[int] = init_std lowercase__ : Optional[int] = decoder_layerdrop lowercase__ : List[str] = use_cache lowercase__ : Optional[int] = decoder_layers lowercase__ : Optional[Any] = scale_embedding # scale factor will be sqrt(d_model) if True lowercase__ : Tuple = max_target_positions super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
87
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
1
import os from typing import BinaryIO, Optional, Union import numpy as np import pyarrow.parquet as pq from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config from ..features.features import FeatureType, _visit from ..formatting import query_table from ..packaged_modules import _PACKAGED_DATASETS_MODULES from ..packaged_modules.parquet.parquet import Parquet from ..utils import logging from ..utils.typing import NestedDataStructureLike, PathLike from .abc import AbstractDatasetReader def lowercase_ ( _lowerCamelCase : Features): lowercase__ : List[Any] = np.inf def set_batch_size(_lowerCamelCase : FeatureType) -> None: nonlocal batch_size if isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : Any = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS) elif isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : Union[str, Any] = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS) elif isinstance(_lowerCamelCase , _lowerCamelCase) and feature.dtype == "binary": lowercase__ : Dict = min(_lowerCamelCase , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS) _visit(_lowerCamelCase , _lowerCamelCase) return None if batch_size is np.inf else batch_size class snake_case_ ( __A ): def __init__( self : List[str] , lowercase_ : NestedDataStructureLike[PathLike] , lowercase_ : Optional[NamedSplit] = None , lowercase_ : Optional[Features] = None , lowercase_ : str = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : Optional[int] = None , **lowercase_ : Union[str, Any] , ) -> Union[str, Any]: super().__init__( lowercase_ , split=lowercase_ , features=lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ , streaming=lowercase_ , num_proc=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = path_or_paths if isinstance(lowercase_ , lowercase_ ) else {self.split: path_or_paths} lowercase__ : Optional[int] = _PACKAGED_DATASETS_MODULES["parquet"][1] lowercase__ : Union[str, Any] = Parquet( cache_dir=lowercase_ , data_files=lowercase_ , features=lowercase_ , hash=lowercase_ , **lowercase_ , ) def __UpperCamelCase ( self : Tuple ) -> Dict: # Build iterable dataset if self.streaming: lowercase__ : List[Any] = self.builder.as_streaming_dataset(split=self.split ) # Build regular (map-style) dataset else: lowercase__ : Any = None lowercase__ : int = None lowercase__ : Dict = None lowercase__ : Union[str, Any] = None self.builder.download_and_prepare( download_config=lowercase_ , download_mode=lowercase_ , verification_mode=lowercase_ , base_path=lowercase_ , num_proc=self.num_proc , ) lowercase__ : Optional[Any] = self.builder.as_dataset( split=self.split , verification_mode=lowercase_ , in_memory=self.keep_in_memory ) return dataset class snake_case_ : def __init__( self : str , lowercase_ : Dataset , lowercase_ : Union[PathLike, BinaryIO] , lowercase_ : Optional[int] = None , **lowercase_ : Optional[Any] , ) -> int: lowercase__ : Dict = dataset lowercase__ : List[Any] = path_or_buf lowercase__ : Any = batch_size or get_writer_batch_size(dataset.features ) lowercase__ : Any = parquet_writer_kwargs def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : Tuple = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ): with open(self.path_or_buf , "wb+" ) as buffer: lowercase__ : str = self._write(file_obj=lowercase_ , batch_size=lowercase_ , **self.parquet_writer_kwargs ) else: lowercase__ : Tuple = self._write(file_obj=self.path_or_buf , batch_size=lowercase_ , **self.parquet_writer_kwargs ) return written def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : BinaryIO , lowercase_ : int , **lowercase_ : Optional[int] ) -> int: lowercase__ : List[Any] = 0 lowercase__ : List[Any] = parquet_writer_kwargs.pop("path_or_buf" , lowercase_ ) lowercase__ : List[Any] = self.dataset.features.arrow_schema lowercase__ : Optional[Any] = pq.ParquetWriter(lowercase_ , schema=lowercase_ , **lowercase_ ) for offset in logging.tqdm( range(0 , len(self.dataset ) , lowercase_ ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating parquet from Arrow format" , ): lowercase__ : Any = query_table( table=self.dataset._data , key=slice(lowercase_ , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , ) writer.write_table(lowercase_ ) written += batch.nbytes writer.close() return written
87
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
1
from datetime import datetime as dt import os from github import Github UpperCamelCase = [ '''good first issue''', '''good second issue''', '''good difficult issue''', '''feature request''', '''new model''', '''wip''', ] def lowercase_ ( ): lowercase__ : Dict = Github(os.environ["GITHUB_TOKEN"]) lowercase__ : Optional[int] = g.get_repo("huggingface/transformers") lowercase__ : Optional[Any] = repo.get_issues(state="open") for issue in open_issues: lowercase__ : Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda _lowerCamelCase: i.created_at , reverse=_lowerCamelCase) lowercase__ : Dict = comments[0] if len(_lowerCamelCase) > 0 else None if ( last_comment is not None and last_comment.user.login == "github-actions[bot]" and (dt.utcnow() - issue.updated_at).days > 7 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.") issue.edit(state="closed") elif ( (dt.utcnow() - issue.updated_at).days > 23 and (dt.utcnow() - issue.created_at).days >= 30 and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels()) ): # print(f"Would add stale comment to {issue.number}") issue.create_comment( "This issue has been automatically marked as stale because it has not had " "recent activity. If you think this still needs to be addressed " "please comment on this thread.\n\nPlease note that issues that do not follow the " "[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) " "are likely to be ignored.") if __name__ == "__main__": main()
87
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
1
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
1
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
from __future__ import annotations def lowercase_ ( _lowerCamelCase : list): if not nums: raise ValueError("List is empty") return sum(_lowerCamelCase) / len(_lowerCamelCase) if __name__ == "__main__": import doctest doctest.testmod()
87
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
1
import gc import inspect import unittest import torch from parameterized import parameterized from diffusers import PriorTransformer from diffusers.utils import floats_tensor, slow, torch_all_close, torch_device from diffusers.utils.testing_utils import enable_full_determinism from .test_modeling_common import ModelTesterMixin enable_full_determinism() class snake_case_ ( __A ,unittest.TestCase ): __A : Any = PriorTransformer __A : Union[str, Any] = "hidden_states" @property def __UpperCamelCase ( self : Dict ) -> str: lowercase__ : Optional[Any] = 4 lowercase__ : Any = 8 lowercase__ : str = 7 lowercase__ : Dict = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ ) lowercase__ : List[str] = floats_tensor((batch_size, embedding_dim) ).to(lowercase_ ) lowercase__ : Dict = floats_tensor((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __UpperCamelCase ( self : Dict , lowercase_ : Optional[int]=0 ) -> int: torch.manual_seed(lowercase_ ) lowercase__ : Tuple = 4 lowercase__ : Optional[int] = 8 lowercase__ : int = 7 lowercase__ : Union[str, Any] = torch.randn((batch_size, embedding_dim) ).to(lowercase_ ) lowercase__ : Any = torch.randn((batch_size, embedding_dim) ).to(lowercase_ ) lowercase__ : List[Any] = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } @property def __UpperCamelCase ( self : List[str] ) -> Optional[int]: return (4, 8) @property def __UpperCamelCase ( self : int ) -> int: return (4, 8) def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: lowercase__ : Union[str, Any] = { "num_attention_heads": 2, "attention_head_dim": 4, "num_layers": 2, "embedding_dim": 8, "num_embeddings": 7, "additional_embeddings": 4, } lowercase__ : Tuple = self.dummy_input return init_dict, inputs_dict def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ , lowercase__ : Any = PriorTransformer.from_pretrained( "hf-internal-testing/prior-dummy" , output_loading_info=lowercase_ ) self.assertIsNotNone(lowercase_ ) self.assertEqual(len(loading_info["missing_keys"] ) , 0 ) model.to(lowercase_ ) lowercase__ : Dict = model(**self.dummy_input )[0] assert hidden_states is not None, "Make sure output is not None" def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: lowercase__ , lowercase__ : str = self.prepare_init_args_and_inputs_for_common() lowercase__ : Any = self.model_class(**lowercase_ ) lowercase__ : Tuple = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[int] = [*signature.parameters.keys()] lowercase__ : Optional[Any] = ["hidden_states", "timestep"] self.assertListEqual(arg_names[:2] , lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: lowercase__ : str = PriorTransformer.from_pretrained("hf-internal-testing/prior-dummy" ) lowercase__ : Tuple = model.to(lowercase_ ) if hasattr(lowercase_ , "set_default_attn_processor" ): model.set_default_attn_processor() lowercase__ : Tuple = self.get_dummy_seed_input() with torch.no_grad(): lowercase__ : Union[str, Any] = model(**lowercase_ )[0] lowercase__ : int = output[0, :5].flatten().cpu() print(lowercase_ ) # Since the VAE Gaussian prior's generator is seeded on the appropriate device, # the expected output slices are not the same for CPU and GPU. lowercase__ : Dict = torch.tensor([-1.34_36, -0.28_70, 0.75_38, 0.43_68, -0.02_39] ) self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2 ) ) @slow class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Any=1 , lowercase_ : Any=7_68 , lowercase_ : Any=77 , lowercase_ : Tuple=0 ) -> int: torch.manual_seed(lowercase_ ) lowercase__ : int = batch_size lowercase__ : Any = embedding_dim lowercase__ : Dict = num_embeddings lowercase__ : str = torch.randn((batch_size, embedding_dim) ).to(lowercase_ ) lowercase__ : Optional[int] = torch.randn((batch_size, embedding_dim) ).to(lowercase_ ) lowercase__ : str = torch.randn((batch_size, num_embeddings, embedding_dim) ).to(lowercase_ ) return { "hidden_states": hidden_states, "timestep": 2, "proj_embedding": proj_embedding, "encoder_hidden_states": encoder_hidden_states, } def __UpperCamelCase ( self : Any ) -> Tuple: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @parameterized.expand( [ # fmt: off [13, [-0.58_61, 0.12_83, -0.09_31, 0.08_82, 0.44_76, 0.13_29, -0.04_98, 0.06_40]], [37, [-0.49_13, 0.01_10, -0.04_83, 0.05_41, 0.49_54, -0.01_70, 0.03_54, 0.16_51]], # fmt: on ] ) def __UpperCamelCase ( self : Dict , lowercase_ : Dict , lowercase_ : Optional[int] ) -> List[Any]: lowercase__ : Tuple = PriorTransformer.from_pretrained("kandinsky-community/kandinsky-2-1-prior" , subfolder="prior" ) model.to(lowercase_ ) lowercase__ : Any = self.get_dummy_seed_input(seed=lowercase_ ) with torch.no_grad(): lowercase__ : int = model(**lowercase_ )[0] assert list(sample.shape ) == [1, 7_68] lowercase__ : List[Any] = sample[0, :8].flatten().cpu() print(lowercase_ ) lowercase__ : List[Any] = torch.tensor(lowercase_ ) assert torch_all_close(lowercase_ , lowercase_ , atol=1E-3 )
87
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
1
import fire from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Tuple = AutoConfig.from_pretrained(_lowerCamelCase , **_lowerCamelCase) lowercase__ : Tuple = AutoModelForSeqaSeqLM.from_config(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) AutoTokenizer.from_pretrained(_lowerCamelCase).save_pretrained(_lowerCamelCase) return model if __name__ == "__main__": fire.Fire(save_randomly_initialized_version)
87
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
1
import itertools import os import random import tempfile import unittest import numpy as np from datasets import load_dataset from transformers import is_speech_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import WhisperFeatureExtractor if is_torch_available(): import torch UpperCamelCase = random.Random() def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any]=1.0 , _lowerCamelCase : int=None , _lowerCamelCase : int=None): if rng is None: lowercase__ : Optional[int] = global_rng lowercase__ : Tuple = [] for batch_idx in range(shape[0]): values.append([]) for _ in range(shape[1]): values[-1].append(rng.random() * scale) return values @require_torch @require_torchaudio class snake_case_ ( unittest.TestCase ): def __init__( self : Optional[Any] , lowercase_ : Any , lowercase_ : Any=7 , lowercase_ : List[str]=4_00 , lowercase_ : int=20_00 , lowercase_ : Union[str, Any]=10 , lowercase_ : Tuple=1_60 , lowercase_ : Dict=8 , lowercase_ : Dict=0.0 , lowercase_ : List[Any]=40_00 , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=True , ) -> List[Any]: lowercase__ : Any = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Dict = min_seq_length lowercase__ : Optional[Any] = max_seq_length lowercase__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowercase__ : Tuple = padding_value lowercase__ : Optional[Any] = sampling_rate lowercase__ : Optional[int] = return_attention_mask lowercase__ : Union[str, Any] = do_normalize lowercase__ : List[str] = feature_size lowercase__ : List[str] = chunk_length lowercase__ : List[Any] = hop_length def __UpperCamelCase ( self : Dict ) -> int: return { "feature_size": self.feature_size, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str=False , lowercase_ : Optional[int]=False ) -> Union[str, Any]: def _flatten(lowercase_ : List[str] ): return list(itertools.chain(*lowercase_ ) ) if equal_length: lowercase__ : List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowercase__ : List[str] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowercase__ : Tuple = [np.asarray(lowercase_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class snake_case_ ( __A ,unittest.TestCase ): __A : Any = WhisperFeatureExtractor if is_speech_available() else None def __UpperCamelCase ( self : Dict ) -> Dict: lowercase__ : Tuple = WhisperFeatureExtractionTester(self ) def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Union[str, Any] = feat_extract_first.save_pretrained(lowercase_ )[0] check_json_file_has_correct_format(lowercase_ ) lowercase__ : Optional[int] = self.feature_extraction_class.from_pretrained(lowercase_ ) lowercase__ : Union[str, Any] = feat_extract_first.to_dict() lowercase__ : List[Any] = feat_extract_second.to_dict() lowercase__ : Optional[Any] = feat_extract_first.mel_filters lowercase__ : str = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowercase_ , lowercase_ ) ) self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Tuple ) -> Any: lowercase__ : Tuple = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowercase__ : Optional[Any] = os.path.join(lowercase_ , "feat_extract.json" ) feat_extract_first.to_json_file(lowercase_ ) lowercase__ : Optional[Any] = self.feature_extraction_class.from_json_file(lowercase_ ) lowercase__ : List[Any] = feat_extract_first.to_dict() lowercase__ : Tuple = feat_extract_second.to_dict() lowercase__ : int = feat_extract_first.mel_filters lowercase__ : str = feat_extract_second.mel_filters self.assertTrue(np.allclose(lowercase_ , lowercase_ ) ) self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Any ) -> Optional[Any]: # Tests that all call wrap to encode_plus and batch_encode_plus lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) # create three inputs of length 800, 1000, and 1200 lowercase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowercase__ : Tuple = [np.asarray(lowercase_ ) for speech_input in speech_inputs] # Test feature size lowercase__ : Dict = feature_extractor(lowercase_ , padding="max_length" , return_tensors="np" ).input_features self.assertTrue(input_features.ndim == 3 ) self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames ) self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size ) # Test not batched input lowercase__ : str = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features lowercase__ : Tuple = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test batched lowercase__ : Union[str, Any] = feature_extractor(lowercase_ , return_tensors="np" ).input_features lowercase__ : Any = feature_extractor(lowercase_ , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test 2-D numpy arrays are batched. lowercase__ : List[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] lowercase__ : Optional[Any] = np.asarray(lowercase_ ) lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) # Test truncation required lowercase__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(2_00 , (feature_extractor.n_samples + 5_00) , 2_00 )] lowercase__ : int = [np.asarray(lowercase_ ) for speech_input in speech_inputs] lowercase__ : Optional[int] = [x[: feature_extractor.n_samples] for x in speech_inputs] lowercase__ : Optional[int] = [np.asarray(lowercase_ ) for speech_input in speech_inputs_truncated] lowercase__ : List[str] = feature_extractor(lowercase_ , return_tensors="np" ).input_features lowercase__ : Tuple = feature_extractor(lowercase_ , return_tensors="np" ).input_features for enc_seq_a, enc_seq_a in zip(lowercase_ , lowercase_ ): self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def __UpperCamelCase ( self : int ) -> Dict: import torch lowercase__ : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ : Dict = np.random.rand(1_00 , 32 ).astype(np.floataa ) lowercase__ : Dict = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: lowercase__ : Tuple = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" ) self.assertTrue(np_processed.input_features.dtype == np.floataa ) lowercase__ : Dict = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" ) self.assertTrue(pt_processed.input_features.dtype == torch.floataa ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] ) -> List[Any]: lowercase__ : int = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech lowercase__ : Dict = ds.sort("id" ).select(range(lowercase_ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __UpperCamelCase ( self : Tuple ) -> Tuple: # fmt: off lowercase__ : Any = torch.tensor( [ 0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51, 0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78, 0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54, -0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54 ] ) # fmt: on lowercase__ : Dict = self._load_datasamples(1 ) lowercase__ : Union[str, Any] = WhisperFeatureExtractor() lowercase__ : List[Any] = feature_extractor(lowercase_ , return_tensors="pt" ).input_features self.assertEqual(input_features.shape , (1, 80, 30_00) ) self.assertTrue(torch.allclose(input_features[0, 0, :30] , lowercase_ , atol=1E-4 ) ) def __UpperCamelCase ( self : Any ) -> str: lowercase__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() ) lowercase__ : Tuple = self._load_datasamples(1 )[0] lowercase__ : Optional[Any] = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_55_35 # Rescale to [0, 65535] to show issue lowercase__ : str = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=lowercase_ )[0] self.assertTrue(np.all(np.mean(lowercase_ ) < 1E-3 ) ) self.assertTrue(np.all(np.abs(np.var(lowercase_ ) - 1 ) < 1E-3 ) )
87
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase = { '''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GPTBigCodeForSequenceClassification''', '''GPTBigCodeForTokenClassification''', '''GPTBigCodeForCausalLM''', '''GPTBigCodeModel''', '''GPTBigCodePreTrainedModel''', ] if TYPE_CHECKING: from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_bigcode import ( GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST, GPTBigCodeForCausalLM, GPTBigCodeForSequenceClassification, GPTBigCodeForTokenClassification, GPTBigCodeModel, GPTBigCodePreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
1
from __future__ import annotations def lowercase_ ( _lowerCamelCase : list[int]): return len(set(_lowerCamelCase)) == len(_lowerCamelCase) if __name__ == "__main__": import doctest doctest.testmod()
87
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
1
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets UpperCamelCase = datasets.logging.get_logger(__name__) UpperCamelCase = '''\ @inproceedings{bleurt, title={BLEURT: Learning Robust Metrics for Text Generation}, author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh}, booktitle={ACL}, year={2020}, url={https://arxiv.org/abs/2004.04696} } ''' UpperCamelCase = '''\ BLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018) and then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune it for your specific application (the latter is expected to perform better). See the project\'s README at https://github.com/google-research/bleurt#readme for more information. ''' UpperCamelCase = ''' BLEURT score. Args: `predictions` (list of str): prediction/candidate sentences `references` (list of str): reference sentences `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None. Returns: \'scores\': List of scores. Examples: >>> predictions = ["hello there", "general kenobi"] >>> references = ["hello there", "general kenobi"] >>> bleurt = datasets.load_metric("bleurt") >>> results = bleurt.compute(predictions=predictions, references=references) >>> print([round(v, 2) for v in results["scores"]]) [1.03, 1.04] ''' UpperCamelCase = { '''bleurt-tiny-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip''', '''bleurt-tiny-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip''', '''bleurt-base-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip''', '''bleurt-base-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip''', '''bleurt-large-128''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip''', '''bleurt-large-512''': '''https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip''', '''BLEURT-20-D3''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip''', '''BLEURT-20-D6''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip''', '''BLEURT-20-D12''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip''', '''BLEURT-20''': '''https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip''', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : List[Any] ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , ) def __UpperCamelCase ( self : Dict , lowercase_ : Any ) -> str: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512')." ) lowercase__ : Optional[int] = "bleurt-base-128" if self.config_name.lower() in CHECKPOINT_URLS: lowercase__ : str = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: lowercase__ : Tuple = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer lowercase__ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) lowercase__ : int = score.BleurtScorer(os.path.join(lowercase_ , lowercase_ ) ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> Union[str, Any]: lowercase__ : str = self.scorer.score(references=lowercase_ , candidates=lowercase_ ) return {"scores": scores}
87
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
1
UpperCamelCase = '''Tobias Carryer''' from time import time class snake_case_ : def __init__( self : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Dict , lowercase_ : Optional[int]=int(time() ) ) -> Dict: # noqa: B008 lowercase__ : Dict = multiplier lowercase__ : int = increment lowercase__ : int = modulo lowercase__ : Optional[int] = seed def __UpperCamelCase ( self : List[Any] ) -> Dict: lowercase__ : Union[str, Any] = (self.multiplier * self.seed + self.increment) % self.modulo return self.seed if __name__ == "__main__": # Show the LCG in action. UpperCamelCase = LinearCongruentialGenerator(166_4525, 10_1390_4223, 2 << 31) while True: print(lcg.next_number())
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int): while second != 0: lowercase__ : Optional[Any] = first & second first ^= second lowercase__ : Optional[int] = c << 1 return first if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = int(input('''Enter the first number: ''').strip()) UpperCamelCase = int(input('''Enter the second number: ''').strip()) print(f"{add(first, second) = }")
87
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConformerConfig, WavaVecaConformerForCTC, WavaVecaConformerForPreTraining, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''post_extract_proj''': '''feature_projection.projection''', '''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''', '''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''', '''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''', '''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''', '''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''', '''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''', '''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''', '''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''', '''self_attn.rotary_emb''': '''encoder.embed_positions''', '''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''', '''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''', '''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''', '''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''', '''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''', '''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''', '''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''', '''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''', '''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''', '''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''', '''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''', '''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''', '''final_layer_norm''': '''encoder.layers.*.final_layer_norm''', '''encoder.layer_norm''': '''encoder.layer_norm''', '''w2v_model.layer_norm''': '''feature_projection.layer_norm''', '''quantizer.weight_proj''': '''quantizer.weight_proj''', '''quantizer.vars''': '''quantizer.codevectors''', '''project_q''': '''project_q''', '''final_proj''': '''project_hid''', '''w2v_encoder.proj''': '''lm_head''', '''mask_emb''': '''masked_spec_embed''', } UpperCamelCase = [ '''lm_head''', '''quantizer.weight_proj''', '''quantizer.codevectors''', '''project_q''', '''project_hid''', ] def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Any): for attribute in key.split("."): lowercase__ : List[Any] = getattr(_lowerCamelCase , _lowerCamelCase) if weight_type is not None: lowercase__ : Tuple = getattr(_lowerCamelCase , _lowerCamelCase).shape else: lowercase__ : Dict = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''') if weight_type == "weight": lowercase__ : Optional[Any] = value elif weight_type == "weight_g": lowercase__ : List[str] = value elif weight_type == "weight_v": lowercase__ : List[Any] = value elif weight_type == "bias": lowercase__ : List[str] = value elif weight_type == "running_mean": lowercase__ : Any = value elif weight_type == "running_var": lowercase__ : int = value elif weight_type == "num_batches_tracked": lowercase__ : Dict = value elif weight_type == "inv_freq": lowercase__ : Tuple = value else: lowercase__ : List[str] = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''') def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]): lowercase__ : int = [] lowercase__ : Union[str, Any] = fairseq_model.state_dict() lowercase__ : Any = hf_model.wavaveca_conformer.feature_extractor for name, value in fairseq_dict.items(): lowercase__ : Tuple = False if "conv_layers" in name: load_conv_layer( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == "group" , ) lowercase__ : Dict = True else: for key, mapped_key in MAPPING.items(): lowercase__ : Any = "wav2vec2_conformer." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]: lowercase__ : List[Any] = True if "*" in mapped_key: lowercase__ : Any = name.split(_lowerCamelCase)[0].split(".")[-2] lowercase__ : Union[str, Any] = mapped_key.replace("*" , _lowerCamelCase) if "pos_bias_u" in name: lowercase__ : Any = None elif "pos_bias_v" in name: lowercase__ : List[str] = None elif "weight_g" in name: lowercase__ : List[str] = "weight_g" elif "weight_v" in name: lowercase__ : Optional[int] = "weight_v" elif "bias" in name: lowercase__ : str = "bias" elif "weight" in name: # TODO: don't match quantizer.weight_proj lowercase__ : Tuple = "weight" elif "running_mean" in name: lowercase__ : List[Any] = "running_mean" elif "inv_freq" in name: lowercase__ : Optional[int] = "inv_freq" elif "running_var" in name: lowercase__ : int = "running_var" elif "num_batches_tracked" in name: lowercase__ : Optional[Any] = "num_batches_tracked" else: lowercase__ : Optional[int] = None set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) continue if not is_used: unused_weights.append(_lowerCamelCase) logger.warning(f'''Unused weights: {unused_weights}''') def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any]): lowercase__ : Optional[Any] = full_name.split("conv_layers.")[-1] lowercase__ : Tuple = name.split(".") lowercase__ : Optional[int] = int(items[0]) lowercase__ : int = int(items[1]) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''') lowercase__ : Dict = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''') lowercase__ : List[str] = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''') elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''') lowercase__ : Tuple = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''') lowercase__ : Optional[int] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''') else: unused_weights.append(_lowerCamelCase) @torch.no_grad() def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Any=None , _lowerCamelCase : Tuple=True): if config_path is not None: lowercase__ : List[Any] = WavaVecaConformerConfig.from_pretrained(_lowerCamelCase , hidden_act="swish") else: lowercase__ : Optional[int] = WavaVecaConformerConfig() if "rope" in checkpoint_path: lowercase__ : str = "rotary" if is_finetuned: if dict_path: lowercase__ : List[str] = Dictionary.load(_lowerCamelCase) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq lowercase__ : Optional[Any] = target_dict.pad_index lowercase__ : Optional[int] = target_dict.bos_index lowercase__ : Tuple = target_dict.eos_index lowercase__ : Tuple = len(target_dict.symbols) lowercase__ : Union[str, Any] = os.path.join(_lowerCamelCase , "vocab.json") if not os.path.isdir(_lowerCamelCase): logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCamelCase)) return os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase) lowercase__ : int = target_dict.indices # fairseq has the <pad> and <s> switched lowercase__ : Tuple = 0 lowercase__ : Union[str, Any] = 1 with open(_lowerCamelCase , "w" , encoding="utf-8") as vocab_handle: json.dump(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = WavaVecaCTCTokenizer( _lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCamelCase , ) lowercase__ : Any = True if config.feat_extract_norm == "layer" else False lowercase__ : int = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , ) lowercase__ : Optional[int] = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase) processor.save_pretrained(_lowerCamelCase) lowercase__ : Tuple = WavaVecaConformerForCTC(_lowerCamelCase) else: lowercase__ : List[str] = WavaVecaConformerForPreTraining(_lowerCamelCase) if is_finetuned: lowercase__ , lowercase__ , lowercase__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])}) else: lowercase__ : List[Any] = argparse.Namespace(task="audio_pretraining") lowercase__ : Optional[int] = fairseq.tasks.setup_task(_lowerCamelCase) lowercase__ , lowercase__ , lowercase__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=_lowerCamelCase) lowercase__ : Optional[Any] = model[0].eval() recursively_load_weights(_lowerCamelCase , _lowerCamelCase , not is_finetuned) hf_wavavec.save_pretrained(_lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument( '''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not''' ) UpperCamelCase = parser.parse_args() convert_wavaveca_conformer_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
87
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
1
import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor UpperCamelCase = logging.get_logger(__name__) class snake_case_ ( __A ): def __init__( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : Union[str, Any] ) -> None: warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
87
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
1
import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''vocab.txt'''} UpperCamelCase = { '''vocab_file''': { '''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''', }, } UpperCamelCase = { '''openbmb/cpm-ant-10b''': 1024, } def lowercase_ ( _lowerCamelCase : int): lowercase__ : Tuple = collections.OrderedDict() with open(_lowerCamelCase , "r" , encoding="utf-8") as reader: lowercase__ : Tuple = reader.readlines() for index, token in enumerate(_lowerCamelCase): lowercase__ : Optional[int] = token.rstrip("\n") lowercase__ : Optional[int] = index return vocab class snake_case_ ( __A ): def __init__( self : str , lowercase_ : int , lowercase_ : str="<unk>" , lowercase_ : List[Any]=2_00 ) -> Union[str, Any]: lowercase__ : Any = vocab lowercase__ : int = unk_token lowercase__ : Union[str, Any] = max_input_chars_per_word def __UpperCamelCase ( self : Dict , lowercase_ : Any ) -> List[Any]: lowercase__ : List[str] = list(lowercase_ ) if len(lowercase_ ) > self.max_input_chars_per_word: return [self.unk_token] lowercase__ : int = 0 lowercase__ : Union[str, Any] = [] while start < len(lowercase_ ): lowercase__ : int = len(lowercase_ ) lowercase__ : List[Any] = None while start < end: lowercase__ : int = "".join(chars[start:end] ) if substr in self.vocab: lowercase__ : Tuple = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(lowercase_ ) lowercase__ : Dict = end return sub_tokens class snake_case_ ( __A ): __A : List[str] = VOCAB_FILES_NAMES __A : str = PRETRAINED_VOCAB_FILES_MAP __A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : int = ["input_ids", "attention_mask"] __A : Optional[Any] = False def __init__( self : Optional[Any] , lowercase_ : str , lowercase_ : str="<d>" , lowercase_ : Dict="</d>" , lowercase_ : str="<s>" , lowercase_ : List[Any]="</s>" , lowercase_ : Tuple="<pad>" , lowercase_ : Optional[Any]="<unk>" , lowercase_ : Optional[Any]="</n>" , lowercase_ : int="</_>" , lowercase_ : List[Any]="left" , **lowercase_ : Union[str, Any] , ) -> Dict: requires_backends(self , ["jieba"] ) super().__init__( bod_token=lowercase_ , eod_token=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , pad_token=lowercase_ , unk_token=lowercase_ , line_token=lowercase_ , space_token=lowercase_ , padding_side=lowercase_ , **lowercase_ , ) lowercase__ : Dict = bod_token lowercase__ : List[Any] = eod_token lowercase__ : List[str] = load_vocab(lowercase_ ) lowercase__ : Tuple = self.encoder[space_token] lowercase__ : Union[str, Any] = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] lowercase__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_ : x[1] ) ) lowercase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} lowercase__ : Dict = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: return self.encoder[self.bod_token] @property def __UpperCamelCase ( self : List[str] ) -> List[Any]: return self.encoder[self.eod_token] @property def __UpperCamelCase ( self : Optional[Any] ) -> int: return self.encoder["\n"] @property def __UpperCamelCase ( self : Any ) -> int: return len(self.encoder ) def __UpperCamelCase ( self : List[str] ) -> List[str]: return dict(self.encoder , **self.added_tokens_encoder ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int ) -> Tuple: lowercase__ : Any = [] for x in jieba.cut(lowercase_ , cut_all=lowercase_ ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowercase_ ) ) return output_tokens def __UpperCamelCase ( self : Any , lowercase_ : Union[str, Any] , **lowercase_ : str ) -> Dict: lowercase__ : str = [i for i in token_ids if i >= 0] lowercase__ : List[str] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : List[str] , lowercase_ : Dict ) -> int: return token in self.encoder def __UpperCamelCase ( self : str , lowercase_ : List[str] ) -> str: return "".join(lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] ) -> List[str]: return self.encoder.get(lowercase_ , self.encoder.get(self.unk_token ) ) def __UpperCamelCase ( self : Any , lowercase_ : List[str] ) -> Optional[int]: return self.decoder.get(lowercase_ , self.unk_token ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: if os.path.isdir(lowercase_ ): lowercase__ : Dict = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: lowercase__ : Optional[Any] = (filename_prefix + "-" if filename_prefix else "") + save_directory lowercase__ : int = 0 if " " in self.encoder: lowercase__ : Tuple = self.encoder[" "] del self.encoder[" "] if "\n" in self.encoder: lowercase__ : List[str] = self.encoder["\n"] del self.encoder["\n"] lowercase__ : Optional[Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowercase_ : x[1] ) ) with open(lowercase_ , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.''' " Please check that the vocabulary is not corrupted!" ) lowercase__ : Union[str, Any] = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : List[int] , lowercase_ : List[int] = None ) -> List[int]: if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __UpperCamelCase ( self : List[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None , lowercase_ : bool = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=lowercase_ , token_ids_a=lowercase_ , already_has_special_tokens=lowercase_ ) if token_ids_a is not None: return [1] + ([0] * len(lowercase_ )) + [1] + ([0] * len(lowercase_ )) return [1] + ([0] * len(lowercase_ ))
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
1
import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## UpperCamelCase = 16 UpperCamelCase = 32 def lowercase_ ( _lowerCamelCase : Accelerator , _lowerCamelCase : int = 16): lowercase__ : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased") lowercase__ : Optional[int] = load_dataset("glue" , "mrpc") def tokenize_function(_lowerCamelCase : Dict): # max_length=None => use the model max length (it's actually the default) lowercase__ : Tuple = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCamelCase , max_length=_lowerCamelCase) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : Optional[int] = datasets.map( _lowerCamelCase , batched=_lowerCamelCase , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : int = tokenized_datasets.rename_column("label" , "labels") def collate_fn(_lowerCamelCase : int): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : str = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : List[Any] = 16 elif accelerator.mixed_precision != "no": lowercase__ : Any = 8 else: lowercase__ : Tuple = None return tokenizer.pad( _lowerCamelCase , padding="longest" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="pt" , ) # Instantiate dataloaders. lowercase__ : Dict = DataLoader( tokenized_datasets["train"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase) lowercase__ : Optional[int] = DataLoader( tokenized_datasets["validation"] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase) return train_dataloader, eval_dataloader # For testing only if os.environ.get('''TESTING_MOCKED_DATALOADERS''', None) == "1": from accelerate.test_utils.training import mocked_dataloaders UpperCamelCase = mocked_dataloaders # noqa: F811 def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any]): # For testing only if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCamelCase) == "1": lowercase__ : int = 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowercase__ : int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir) else: lowercase__ : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Any = config["lr"] lowercase__ : List[str] = int(config["num_epochs"]) lowercase__ : List[str] = int(config["seed"]) lowercase__ : int = int(config["batch_size"]) set_seed(_lowerCamelCase) lowercase__ , lowercase__ : Optional[Any] = get_dataloaders(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = evaluate.load("glue" , "mrpc") # If the batch size is too big we use gradient accumulation lowercase__ : Tuple = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowercase__ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE lowercase__ : Union[str, Any] = MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : Tuple = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCamelCase) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : Any = model.to(accelerator.device) # Instantiate optimizer lowercase__ : List[str] = AdamW(params=model.parameters() , lr=_lowerCamelCase) # Instantiate scheduler lowercase__ : Optional[int] = get_linear_schedule_with_warmup( optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = accelerator.prepare( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowercase__ : Dict = os.path.split(_lowerCamelCase)[-1].split(".")[0] accelerator.init_trackers(_lowerCamelCase , _lowerCamelCase) # Now we train the model for epoch in range(_lowerCamelCase): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowercase__ : Optional[Any] = 0 for step, batch in enumerate(_lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device) lowercase__ : Dict = model(**_lowerCamelCase) lowercase__ : Tuple = outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowercase__ : str = loss / gradient_accumulation_steps accelerator.backward(_lowerCamelCase) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(_lowerCamelCase): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device) with torch.no_grad(): lowercase__ : Tuple = model(**_lowerCamelCase) lowercase__ : str = outputs.logits.argmax(dim=-1) lowercase__ , lowercase__ : Tuple = accelerator.gather_for_metrics((predictions, batch["labels"])) metric.add_batch( predictions=_lowerCamelCase , references=_lowerCamelCase , ) lowercase__ : Dict = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , _lowerCamelCase) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(_lowerCamelCase), "epoch": epoch, } , step=_lowerCamelCase , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase_ ( ): lowercase__ : str = argparse.ArgumentParser(description="Simple example of training script.") parser.add_argument( "--mixed_precision" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU.") parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=_lowerCamelCase , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) lowercase__ : int = parser.parse_args() lowercase__ : Tuple = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(_lowerCamelCase , _lowerCamelCase) if __name__ == "__main__": main()
87
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''uclanlp/visualbert-vqa''': '''https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json''', '''uclanlp/visualbert-vqa-pre''': '''https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json''', '''uclanlp/visualbert-vqa-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-vcr''': '''https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json''', '''uclanlp/visualbert-vcr-pre''': '''https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json''', '''uclanlp/visualbert-vcr-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json''' ), '''uclanlp/visualbert-nlvr2''': '''https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-pre''': '''https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json''', '''uclanlp/visualbert-nlvr2-coco-pre''': ( '''https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json''' ) # See all VisualBERT models at https://huggingface.co/models?filter=visual_bert } class snake_case_ ( __A ): __A : Union[str, Any] = "visual_bert" def __init__( self : Any , lowercase_ : Any=3_05_22 , lowercase_ : List[Any]=7_68 , lowercase_ : List[str]=5_12 , lowercase_ : Dict=12 , lowercase_ : Union[str, Any]=12 , lowercase_ : List[Any]=30_72 , lowercase_ : Dict="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : int=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : int=2 , lowercase_ : List[Any]=0.02 , lowercase_ : List[Any]=1E-12 , lowercase_ : str=False , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=1 , lowercase_ : List[str]=0 , lowercase_ : str=2 , **lowercase_ : Dict , ) -> List[str]: super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ ) lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = max_position_embeddings lowercase__ : List[str] = hidden_size lowercase__ : str = visual_embedding_dim lowercase__ : List[Any] = num_hidden_layers lowercase__ : str = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Any = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : Any = attention_probs_dropout_prob lowercase__ : Dict = initializer_range lowercase__ : str = type_vocab_size lowercase__ : Any = layer_norm_eps lowercase__ : Tuple = bypass_transformer lowercase__ : Tuple = special_visual_initialize
87
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
1
UpperCamelCase = '''ABCDEFGHIJKLMNOPQRSTUVWXYZ''' def lowercase_ ( ): lowercase__ : int = input("Enter message: ") lowercase__ : Optional[int] = input("Enter key [alphanumeric]: ") lowercase__ : str = input("Encrypt/Decrypt [e/d]: ") if mode.lower().startswith("e"): lowercase__ : Union[str, Any] = "encrypt" lowercase__ : Optional[Any] = encrypt_message(_lowerCamelCase , _lowerCamelCase) elif mode.lower().startswith("d"): lowercase__ : Union[str, Any] = "decrypt" lowercase__ : str = decrypt_message(_lowerCamelCase , _lowerCamelCase) print(f'''\n{mode.title()}ed message:''') print(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str): return translate_message(_lowerCamelCase , _lowerCamelCase , "encrypt") def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str): return translate_message(_lowerCamelCase , _lowerCamelCase , "decrypt") def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : Union[str, Any] = [] lowercase__ : List[Any] = 0 lowercase__ : int = key.upper() for symbol in message: lowercase__ : Tuple = LETTERS.find(symbol.upper()) if num != -1: if mode == "encrypt": num += LETTERS.find(key[key_index]) elif mode == "decrypt": num -= LETTERS.find(key[key_index]) num %= len(_lowerCamelCase) if symbol.isupper(): translated.append(LETTERS[num]) elif symbol.islower(): translated.append(LETTERS[num].lower()) key_index += 1 if key_index == len(_lowerCamelCase): lowercase__ : Dict = 0 else: translated.append(_lowerCamelCase) return "".join(_lowerCamelCase) if __name__ == "__main__": main()
87
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
1
import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : Dict , lowercase_ : List[str]=7 , lowercase_ : Tuple=3 , lowercase_ : List[str]=18 , lowercase_ : List[Any]=30 , lowercase_ : Tuple=4_00 , lowercase_ : Any=True , lowercase_ : List[Any]=None , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=None , ) -> Optional[Any]: lowercase__ : Union[str, Any] = size if size is not None else {"shortest_edge": 20} lowercase__ : Optional[int] = crop_size if crop_size is not None else {"height": 18, "width": 18} lowercase__ : List[str] = parent lowercase__ : int = batch_size lowercase__ : Any = num_channels lowercase__ : Optional[Any] = image_size lowercase__ : int = min_resolution lowercase__ : List[str] = max_resolution lowercase__ : List[Any] = do_resize lowercase__ : Optional[int] = size lowercase__ : Dict = do_center_crop lowercase__ : Optional[int] = crop_size def __UpperCamelCase ( self : List[Any] ) -> Any: return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, } @require_torch @require_vision class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = MobileNetVaImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : Tuple ) -> List[str]: lowercase__ : int = MobileNetVaImageProcessingTester(self ) @property def __UpperCamelCase ( self : Any ) -> str: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : str ) -> Union[str, Any]: lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , "do_resize" ) ) self.assertTrue(hasattr(lowercase_ , "size" ) ) self.assertTrue(hasattr(lowercase_ , "do_center_crop" ) ) self.assertTrue(hasattr(lowercase_ , "crop_size" ) ) def __UpperCamelCase ( self : List[str] ) -> Optional[Any]: lowercase__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 20} ) self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18} ) lowercase__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {"shortest_edge": 42} ) self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84} ) def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: pass def __UpperCamelCase ( self : Optional[Any] ) -> str: # Initialize image_processing lowercase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowercase__ : Dict = image_processing(lowercase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase ( self : Any ) -> str: # Initialize image_processing lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input lowercase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowercase__ : Optional[Any] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: # Initialize image_processing lowercase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input lowercase__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , ) # Test batched lowercase__ : List[str] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size["height"], self.image_processor_tester.crop_size["width"], ) , )
87
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
1
import math def lowercase_ ( _lowerCamelCase : int): return math.sqrt(_lowerCamelCase) * math.sqrt(_lowerCamelCase) == num def lowercase_ ( _lowerCamelCase : int): lowercase__ : List[Any] = 0 lowercase__ : Tuple = n while left <= right: lowercase__ : str = (left + right) // 2 if mid**2 == n: return True elif mid**2 > n: lowercase__ : Any = mid - 1 else: lowercase__ : Union[str, Any] = mid + 1 return False if __name__ == "__main__": import doctest doctest.testmod()
87
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
1
import os import re import shutil import sys import tempfile import unittest import black UpperCamelCase = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. UpperCamelCase = ''' def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states ''' class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] ) -> Tuple: lowercase__ : str = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir , "models/bert/" ) ) lowercase__ : Optional[Any] = self.transformer_dir shutil.copy( os.path.join(lowercase_ , "src/transformers/models/bert/modeling_bert.py" ) , os.path.join(self.transformer_dir , "models/bert/modeling_bert.py" ) , ) def __UpperCamelCase ( self : str ) -> int: lowercase__ : Optional[int] = "src/transformers" shutil.rmtree(self.transformer_dir ) def __UpperCamelCase ( self : Dict , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=None ) -> Any: lowercase__ : int = comment + F'''\nclass {class_name}(nn.Module):\n''' + class_code if overwrite_result is not None: lowercase__ : List[str] = comment + F'''\nclass {class_name}(nn.Module):\n''' + overwrite_result lowercase__ : int = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 ) lowercase__ : Optional[Any] = black.format_str(lowercase_ , mode=lowercase_ ) lowercase__ : Tuple = os.path.join(self.transformer_dir , "new_code.py" ) with open(lowercase_ , "w" , newline="\n" ) as f: f.write(lowercase_ ) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(lowercase_ ) ) == 0 ) else: check_copies.is_copy_consistent(f.name , overwrite=lowercase_ ) with open(lowercase_ , "r" ) as f: self.assertTrue(f.read() , lowercase_ ) def __UpperCamelCase ( self : int ) -> str: lowercase__ : List[str] = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead" ) self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> List[str]: # Base copy consistency self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , REFERENCE_CODE + "\n" , ) # With no empty line at the end self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead" , "BertLMPredictionHead" , lowercase_ , ) # Copy consistency with rename self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , re.sub("Bert" , "TestModel" , lowercase_ ) , ) # Copy consistency with a really long name lowercase__ : Union[str, Any] = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason" self.check_copy_consistency( F'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , F'''{long_class_name}LMPredictionHead''' , re.sub("Bert" , lowercase_ , lowercase_ ) , ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel" , "TestModelLMPredictionHead" , lowercase_ , overwrite_result=re.sub("Bert" , "TestModel" , lowercase_ ) , ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: lowercase__ : Optional[int] = check_copies.LOCALIZED_READMES["README_zh-hans.md"] lowercase__ : str = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace)," " released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**" " (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders" " as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang" " Luong, Quoc V. Le, Christopher D. Manning." ) lowercase__ : Optional[Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowercase__ : str = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1." " **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文" " [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and" " lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same" " method has been applied to compress GPT2 into" " [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into" " [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation)," " Multilingual BERT into" " [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German" " version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自" " Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather" " than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le," " Christopher D. Manning 发布。\n" ) lowercase__ , lowercase__ : Tuple = check_copies.convert_to_localized_md( lowercase_ , lowercase_ , localized_readme["format_model_list"] ) self.assertFalse(lowercase_ ) self.assertEqual(lowercase_ , lowercase_ ) lowercase__ , lowercase__ : Optional[Any] = check_copies.convert_to_localized_md( lowercase_ , lowercase_ , localized_readme["format_model_list"] ) # Check whether the number of models is equal to README.md after conversion. self.assertTrue(lowercase_ ) lowercase__ : Any = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the" " Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for" " Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong" " Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut." ) lowercase__ : Union[str, Any] = ( "1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and" " the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowercase__ : List[str] = ( "1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the" " Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of" " Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian" " Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n" ) lowercase__ , lowercase__ : Dict = check_copies.convert_to_localized_md( lowercase_ , lowercase_ , localized_readme["format_model_list"] ) # Check if the model link is synchronized. self.assertEqual(lowercase_ , lowercase_ )
87
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
1
import inspect import os import sys import unittest import accelerate from accelerate.test_utils import execute_subprocess_async, require_tpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : List[Any] = inspect.getfile(accelerate.test_utils ) lowercase__ : Any = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] ) lowercase__ : str = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] ) @require_tpu def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ : List[str] = F''' {self.test_dir}/xla_spawn.py --num_cores 8 {self.test_file_path} '''.split() lowercase__ : Optional[Any] = [sys.executable] + distributed_args execute_subprocess_async(lowercase_ , env=os.environ.copy() )
87
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
1
import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_albert import AlbertTokenizer else: UpperCamelCase = None UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''} UpperCamelCase = { '''vocab_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''', }, '''tokenizer_file''': { '''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''', '''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''', '''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''', '''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''', '''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''', '''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''', '''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''', '''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''', }, } UpperCamelCase = { '''albert-base-v1''': 512, '''albert-large-v1''': 512, '''albert-xlarge-v1''': 512, '''albert-xxlarge-v1''': 512, '''albert-base-v2''': 512, '''albert-large-v2''': 512, '''albert-xlarge-v2''': 512, '''albert-xxlarge-v2''': 512, } UpperCamelCase = '''▁''' class snake_case_ ( __A ): __A : int = VOCAB_FILES_NAMES __A : Dict = PRETRAINED_VOCAB_FILES_MAP __A : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __A : int = AlbertTokenizer def __init__( self : Tuple , lowercase_ : Optional[Any]=None , lowercase_ : List[Any]=None , lowercase_ : str=True , lowercase_ : Dict=True , lowercase_ : int=False , lowercase_ : Optional[int]="[CLS]" , lowercase_ : List[str]="[SEP]" , lowercase_ : List[str]="<unk>" , lowercase_ : List[Any]="[SEP]" , lowercase_ : Dict="<pad>" , lowercase_ : Optional[Any]="[CLS]" , lowercase_ : Tuple="[MASK]" , **lowercase_ : Any , ) -> Tuple: # Mask token behave like a normal word, i.e. include the space before it and # is included in the raw text, there should be a match in a non-normalized sentence. lowercase__ : Any = ( AddedToken(lowercase_ , lstrip=lowercase_ , rstrip=lowercase_ , normalized=lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else mask_token ) super().__init__( lowercase_ , tokenizer_file=lowercase_ , do_lower_case=lowercase_ , remove_space=lowercase_ , keep_accents=lowercase_ , bos_token=lowercase_ , eos_token=lowercase_ , unk_token=lowercase_ , sep_token=lowercase_ , pad_token=lowercase_ , cls_token=lowercase_ , mask_token=lowercase_ , **lowercase_ , ) lowercase__ : str = do_lower_case lowercase__ : Any = remove_space lowercase__ : Any = keep_accents lowercase__ : Union[str, Any] = vocab_file lowercase__ : Optional[int] = False if not self.vocab_file else True def __UpperCamelCase ( self : Optional[Any] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: lowercase__ : str = [self.sep_token_id] lowercase__ : int = [self.cls_token_id] if token_ids_a is None: return cls + token_ids_a + sep return cls + token_ids_a + sep + token_ids_a + sep def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[int] , lowercase_ : Optional[List[int]] = None ) -> List[int]: lowercase__ : Tuple = [self.sep_token_id] lowercase__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __UpperCamelCase ( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[str] = None ) -> Tuple[str]: if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(lowercase_ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return lowercase__ : Any = os.path.join( lowercase_ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase_ ): copyfile(self.vocab_file , lowercase_ ) return (out_vocab_file,)
87
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
1
import gc import unittest import numpy as np import torch from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS, CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = DiTPipeline __A : Any = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS __A : Union[str, Any] = PipelineTesterMixin.required_optional_params - { "latents", "num_images_per_prompt", "callback", "callback_steps", } __A : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS __A : Any = False def __UpperCamelCase ( self : str ) -> List[Any]: torch.manual_seed(0 ) lowercase__ : str = TransformeraDModel( sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowercase_ , activation_fn="gelu-approximate" , num_embeds_ada_norm=10_00 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowercase_ , ) lowercase__ : Dict = AutoencoderKL() lowercase__ : Tuple = DDIMScheduler() lowercase__ : str = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler} return components def __UpperCamelCase ( self : Dict , lowercase_ : str , lowercase_ : Any=0 ) -> Optional[Any]: if str(lowercase_ ).startswith("mps" ): lowercase__ : List[Any] = torch.manual_seed(lowercase_ ) else: lowercase__ : str = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : str = { "class_labels": [1], "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Optional[Any] = "cpu" lowercase__ : str = self.get_dummy_components() lowercase__ : List[Any] = self.pipeline_class(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Optional[int] = self.get_dummy_inputs(lowercase_ ) lowercase__ : Union[str, Any] = pipe(**lowercase_ ).images lowercase__ : List[str] = image[0, -3:, -3:, -1] self.assertEqual(image.shape , (1, 16, 16, 3) ) lowercase__ : Any = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] ) lowercase__ : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max() self.assertLessEqual(lowercase_ , 1E-3 ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: self._test_inference_batch_single_identical(relax_max_difference=lowercase_ , expected_max_diff=1E-3 ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __UpperCamelCase ( self : List[str] ) -> Optional[int]: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 ) @require_torch_gpu @slow class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> Any: super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ : Union[str, Any] = torch.manual_seed(0 ) lowercase__ : Union[str, Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" ) pipe.to("cuda" ) lowercase__ : List[Any] = ["vase", "umbrella", "white shark", "white wolf"] lowercase__ : Dict = pipe.get_label_ids(lowercase_ ) lowercase__ : Optional[Any] = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=40 , output_type="np" ).images for word, image in zip(lowercase_ , lowercase_ ): lowercase__ : Tuple = load_numpy( F'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-2 def __UpperCamelCase ( self : Tuple ) -> Any: lowercase__ : Dict = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" ) lowercase__ : str = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config ) pipe.to("cuda" ) lowercase__ : List[Any] = ["vase", "umbrella"] lowercase__ : List[str] = pipe.get_label_ids(lowercase_ ) lowercase__ : Optional[int] = torch.manual_seed(0 ) lowercase__ : int = pipe(lowercase_ , generator=lowercase_ , num_inference_steps=25 , output_type="np" ).images for word, image in zip(lowercase_ , lowercase_ ): lowercase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" F'''/dit/{word}_512.npy''' ) assert np.abs((expected_image - image).max() ) < 1E-1
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class snake_case_ ( __A ): __A : Dict = "M-CLIP" def __init__( self : Union[str, Any] , lowercase_ : Optional[Any]=10_24 , lowercase_ : Optional[int]=7_68 , **lowercase_ : Tuple ) -> Optional[int]: lowercase__ : Tuple = transformerDimSize lowercase__ : Union[str, Any] = imageDimSize super().__init__(**lowercase_ ) class snake_case_ ( __A ): __A : int = MCLIPConfig def __init__( self : int , lowercase_ : Any , *lowercase_ : Dict , **lowercase_ : Dict ) -> Any: super().__init__(lowercase_ , *lowercase_ , **lowercase_ ) lowercase__ : Dict = XLMRobertaModel(lowercase_ ) lowercase__ : Optional[int] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[int] , lowercase_ : Any ) -> str: lowercase__ : Dict = self.transformer(input_ids=lowercase_ , attention_mask=lowercase_ )[0] lowercase__ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None] return self.LinearTransformation(lowercase_ ), embs
87
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
1
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse from .config import config_command_parser from .config_args import default_config_file, load_config_from_file # noqa: F401 from .default import default_command_parser from .update import update_command_parser def lowercase_ ( _lowerCamelCase : Dict=None): lowercase__ : Union[str, Any] = argparse.ArgumentParser(add_help=_lowerCamelCase , allow_abbrev=_lowerCamelCase) # The main config parser lowercase__ : Dict = config_command_parser(_lowerCamelCase) # The subparser to add commands to lowercase__ : List[str] = config_parser.add_subparsers(title="subcommands" , dest="subcommand") # Then add other parsers with the parent parser default_command_parser(_lowerCamelCase , parents=[parent_parser]) update_command_parser(_lowerCamelCase , parents=[parent_parser]) return config_parser def lowercase_ ( ): lowercase__ : Tuple = get_config_parser() lowercase__ : List[Any] = config_parser.parse_args() if not hasattr(_lowerCamelCase , "func"): config_parser.print_help() exit(1) # Run args.func(_lowerCamelCase) if __name__ == "__main__": main()
87
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
1
import math from collections.abc import Callable def lowercase_ ( _lowerCamelCase : Callable[[float], float] , _lowerCamelCase : float , _lowerCamelCase : float): lowercase__ : float = xa lowercase__ : float = xa while True: if x_n == x_na or function(_lowerCamelCase) == function(_lowerCamelCase): raise ZeroDivisionError("float division by zero, could not find root") lowercase__ : float = x_na - ( function(_lowerCamelCase) / ((function(_lowerCamelCase) - function(_lowerCamelCase)) / (x_na - x_n)) ) if abs(x_na - x_na) < 10**-5: return x_na lowercase__ : List[str] = x_na lowercase__ : Any = x_na def lowercase_ ( _lowerCamelCase : float): return math.pow(_lowerCamelCase , 3) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
87
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
1
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase = {'''configuration_timm_backbone''': ['''TimmBackboneConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''TimmBackbone'''] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
1
import copy from typing import Any, Dict, List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import SequenceFeatureExtractor from ...feature_extraction_utils import BatchFeature from ...utils import TensorType, logging UpperCamelCase = logging.get_logger(__name__) class snake_case_ ( __A ): __A : List[str] = ["input_features"] def __init__( self : List[str] , lowercase_ : List[Any]=80 , lowercase_ : List[str]=1_60_00 , lowercase_ : Optional[int]=1_60 , lowercase_ : Tuple=30 , lowercase_ : Optional[Any]=4_00 , lowercase_ : str=0.0 , lowercase_ : Any=False , **lowercase_ : Tuple , ) -> Union[str, Any]: super().__init__( feature_size=lowercase_ , sampling_rate=lowercase_ , padding_value=lowercase_ , return_attention_mask=lowercase_ , **lowercase_ , ) lowercase__ : Tuple = n_fft lowercase__ : int = hop_length lowercase__ : Tuple = chunk_length lowercase__ : List[Any] = chunk_length * sampling_rate lowercase__ : Union[str, Any] = self.n_samples // hop_length lowercase__ : Tuple = sampling_rate lowercase__ : List[str] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowercase_ , min_frequency=0.0 , max_frequency=80_00.0 , sampling_rate=lowercase_ , norm="slaney" , mel_scale="slaney" , ) def __UpperCamelCase ( self : List[str] , lowercase_ : np.array ) -> np.ndarray: lowercase__ : Tuple = spectrogram( lowercase_ , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel="log10" , ) lowercase__ : int = log_spec[:, :-1] lowercase__ : int = np.maximum(lowercase_ , log_spec.max() - 8.0 ) lowercase__ : List[Any] = (log_spec + 4.0) / 4.0 return log_spec @staticmethod # Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm def __UpperCamelCase ( lowercase_ : List[np.ndarray] , lowercase_ : List[np.ndarray] , lowercase_ : float = 0.0 ) -> List[np.ndarray]: if attention_mask is not None: lowercase__ : int = np.array(lowercase_ , np.intaa ) lowercase__ : List[Any] = [] for vector, length in zip(lowercase_ , attention_mask.sum(-1 ) ): lowercase__ : Optional[Any] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 ) if length < normed_slice.shape[0]: lowercase__ : Union[str, Any] = padding_value normed_input_values.append(lowercase_ ) else: lowercase__ : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values] return normed_input_values def __call__( self : int , lowercase_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , lowercase_ : bool = True , lowercase_ : Optional[int] = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[str] = "max_length" , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , **lowercase_ : Optional[int] , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( F'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a''' F''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input''' F''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( "It is strongly recommended to pass the `sampling_rate` argument to this function. " "Failing to do so can result in silent errors that might be hard to debug." ) lowercase__ : List[Any] = isinstance(lowercase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) lowercase__ : Union[str, Any] = is_batched_numpy or ( isinstance(lowercase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: lowercase__ : Any = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(lowercase_ , np.ndarray ): lowercase__ : int = np.asarray(lowercase_ , dtype=np.floataa ) elif isinstance(lowercase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): lowercase__ : str = raw_speech.astype(np.floataa ) # always return batch if not is_batched: lowercase__ : Dict = [np.asarray([raw_speech] ).T] lowercase__ : Union[str, Any] = BatchFeature({"input_features": raw_speech} ) # convert into correct format for padding lowercase__ : Any = self.pad( lowercase_ , padding=lowercase_ , max_length=max_length if max_length else self.n_samples , truncation=lowercase_ , pad_to_multiple_of=lowercase_ , return_attention_mask=return_attention_mask or do_normalize , ) # zero-mean and unit-variance normalization if do_normalize: lowercase__ : str = self.zero_mean_unit_var_norm( padded_inputs["input_features"] , attention_mask=padded_inputs["attention_mask"] , padding_value=self.padding_value , ) lowercase__ : Dict = np.stack(padded_inputs["input_features"] , axis=0 ) # make sure list is in array format lowercase__ : str = padded_inputs.get("input_features" ).transpose(2 , 0 , 1 ) lowercase__ : List[str] = [self._np_extract_fbank_features(lowercase_ ) for waveform in input_features[0]] if isinstance(input_features[0] , lowercase_ ): lowercase__ : List[Any] = [np.asarray(lowercase_ , dtype=np.floataa ) for feature in input_features] else: lowercase__ : Tuple = input_features if return_attention_mask: # rescale from sample (48000) to feature (3000) lowercase__ : List[Any] = padded_inputs["attention_mask"][:, :: self.hop_length] if return_tensors is not None: lowercase__ : Dict = padded_inputs.convert_to_tensors(lowercase_ ) return padded_inputs def __UpperCamelCase ( self : List[str] ) -> Dict[str, Any]: lowercase__ : Optional[Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Union[str, Any] = self.__class__.__name__ if "mel_filters" in output: del output["mel_filters"] return output
87
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
1
from __future__ import annotations from math import gcd def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int = 2 , _lowerCamelCase : int = 1 , _lowerCamelCase : int = 3 , ): # A value less than 2 can cause an infinite loop in the algorithm. if num < 2: raise ValueError("The input value cannot be less than 2") # Because of the relationship between ``f(f(x))`` and ``f(x)``, this # algorithm struggles to find factors that are divisible by two. # As a workaround, we specifically check for two and even inputs. # See: https://math.stackexchange.com/a/2856214/165820 if num > 2 and num % 2 == 0: return 2 # Pollard's Rho algorithm requires a function that returns pseudorandom # values between 0 <= X < ``num``. It doesn't need to be random in the # sense that the output value is cryptographically secure or difficult # to calculate, it only needs to be random in the sense that all output # values should be equally likely to appear. # For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num`` # However, the success of Pollard's algorithm isn't guaranteed and is # determined in part by the initial seed and the chosen random function. # To make retries easier, we will instead use ``f(x) = (x**2 + C) % num`` # where ``C`` is a value that we can modify between each attempt. def rand_fn(_lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int) -> int: return (pow(_lowerCamelCase , 2) + step) % modulus for _ in range(_lowerCamelCase): # These track the position within the cycle detection logic. lowercase__ : Optional[int] = seed lowercase__ : int = seed while True: # At each iteration, the tortoise moves one step and the hare moves two. lowercase__ : List[Any] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : int = rand_fn(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # At some point both the tortoise and the hare will enter a cycle whose # length ``p`` is a divisor of ``num``. Once in that cycle, at some point # the tortoise and hare will end up on the same value modulo ``p``. # We can detect when this happens because the position difference between # the tortoise and the hare will share a common divisor with ``num``. lowercase__ : Any = gcd(hare - tortoise , _lowerCamelCase) if divisor == 1: # No common divisor yet, just keep searching. continue else: # We found a common divisor! if divisor == num: # Unfortunately, the divisor is ``num`` itself and is useless. break else: # The divisor is a nontrivial factor of ``num``! return divisor # If we made it here, then this attempt failed. # We need to pick a new starting seed for the tortoise and hare # in addition to a new step value for the random function. # To keep this example implementation deterministic, the # new values will be generated based on currently available # values instead of using something like ``random.randint``. # We can use the hare's position as the new seed. # This is actually what Richard Brent's the "optimized" variant does. lowercase__ : Dict = hare # The new step value for the random function can just be incremented. # At first the results will be similar to what the old function would # have produced, but the value will quickly diverge after a bit. step += 1 # We haven't found a divisor within the requested number of attempts. # We were unlucky or ``num`` itself is actually prime. return None if __name__ == "__main__": import argparse UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''num''', type=int, help='''The value to find a divisor of''', ) parser.add_argument( '''--attempts''', type=int, default=3, help='''The number of attempts before giving up''', ) UpperCamelCase = parser.parse_args() UpperCamelCase = pollard_rho(args.num, attempts=args.attempts) if divisor is None: print(f"{args.num} is probably prime") else: UpperCamelCase = args.num // divisor print(f"{args.num} = {divisor} * {quotient}")
87
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
1
from __future__ import annotations import unittest from transformers import FunnelConfig, is_tf_available from transformers.testing_utils import require_tf from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) class snake_case_ : def __init__( self : List[str] , lowercase_ : List[str] , lowercase_ : List[str]=13 , lowercase_ : Optional[int]=7 , lowercase_ : Optional[Any]=True , lowercase_ : str=True , lowercase_ : Optional[int]=True , lowercase_ : List[str]=True , lowercase_ : Any=99 , lowercase_ : Any=[1, 1, 2] , lowercase_ : List[str]=1 , lowercase_ : Union[str, Any]=32 , lowercase_ : Optional[Any]=4 , lowercase_ : Optional[int]=8 , lowercase_ : str=37 , lowercase_ : Tuple="gelu_new" , lowercase_ : Any=0.1 , lowercase_ : Dict=0.1 , lowercase_ : Optional[Any]=0.0 , lowercase_ : int=5_12 , lowercase_ : List[str]=3 , lowercase_ : List[Any]=0.02 , lowercase_ : str=3 , lowercase_ : Dict=4 , lowercase_ : List[str]=None , lowercase_ : Dict=False , ) -> Tuple: lowercase__ : List[Any] = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : int = seq_length lowercase__ : Any = is_training lowercase__ : Dict = use_input_mask lowercase__ : Optional[int] = use_token_type_ids lowercase__ : str = use_labels lowercase__ : int = vocab_size lowercase__ : Dict = block_sizes lowercase__ : str = num_decoder_layers lowercase__ : str = d_model lowercase__ : List[str] = n_head lowercase__ : Union[str, Any] = d_head lowercase__ : List[str] = d_inner lowercase__ : int = hidden_act lowercase__ : Any = hidden_dropout lowercase__ : Optional[Any] = attention_dropout lowercase__ : int = activation_dropout lowercase__ : Tuple = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Optional[int] = 2 lowercase__ : int = num_labels lowercase__ : Dict = num_choices lowercase__ : List[Any] = scope lowercase__ : Union[str, Any] = initializer_std # Used in the tests to check the size of the first attention layer lowercase__ : str = n_head # Used in the tests to check the size of the first hidden state lowercase__ : Optional[Any] = self.d_model # Used in the tests to check the number of output hidden states/attentions lowercase__ : List[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers) # FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with # the last hidden state of the first block (which is the first hidden state of the decoder). if not base: lowercase__ : Optional[int] = self.num_hidden_layers + 2 def __UpperCamelCase ( self : int ) -> Tuple: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_input_mask: lowercase__ : int = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : int = None if self.use_token_type_ids: lowercase__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : Union[str, Any] = None lowercase__ : List[str] = None lowercase__ : Tuple = None if self.use_labels: lowercase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) lowercase__ : int = FunnelConfig( vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , ) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : Dict , ) -> str: lowercase__ : Optional[int] = TFFunnelModel(config=lowercase_ ) lowercase__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ : Tuple = model(lowercase_ ) lowercase__ : Any = [input_ids, input_mask] lowercase__ : Optional[int] = model(lowercase_ ) lowercase__ : Dict = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowercase__ : Any = False lowercase__ : List[str] = TFFunnelModel(config=lowercase_ ) lowercase__ : int = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) lowercase__ : List[Any] = False lowercase__ : Union[str, Any] = TFFunnelModel(config=lowercase_ ) lowercase__ : Optional[int] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) ) def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Any , ) -> List[str]: lowercase__ : Optional[int] = TFFunnelBaseModel(config=lowercase_ ) lowercase__ : Dict = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ : Optional[int] = model(lowercase_ ) lowercase__ : Any = [input_ids, input_mask] lowercase__ : Dict = model(lowercase_ ) lowercase__ : Any = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) lowercase__ : Optional[int] = False lowercase__ : Union[str, Any] = TFFunnelBaseModel(config=lowercase_ ) lowercase__ : List[Any] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) ) lowercase__ : Any = False lowercase__ : Any = TFFunnelBaseModel(config=lowercase_ ) lowercase__ : Tuple = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) ) def __UpperCamelCase ( self : Any , lowercase_ : int , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , ) -> Optional[Any]: lowercase__ : Tuple = TFFunnelForPreTraining(config=lowercase_ ) lowercase__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ : Dict = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : int , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Tuple , lowercase_ : Optional[int] , ) -> Union[str, Any]: lowercase__ : Dict = TFFunnelForMaskedLM(config=lowercase_ ) lowercase__ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ : Dict = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : List[str] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : int , ) -> Dict: lowercase__ : Optional[int] = self.num_labels lowercase__ : List[Any] = TFFunnelForSequenceClassification(config=lowercase_ ) lowercase__ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ : Tuple = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : str , ) -> Any: lowercase__ : List[str] = self.num_choices lowercase__ : Optional[Any] = TFFunnelForMultipleChoice(config=lowercase_ ) lowercase__ : Dict = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) ) lowercase__ : Any = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) ) lowercase__ : int = tf.tile(tf.expand_dims(lowercase_ , 1 ) , (1, self.num_choices, 1) ) lowercase__ : int = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } lowercase__ : Dict = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : int , lowercase_ : str , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : Any , lowercase_ : Dict , lowercase_ : str , ) -> str: lowercase__ : List[Any] = self.num_labels lowercase__ : List[str] = TFFunnelForTokenClassification(config=lowercase_ ) lowercase__ : Any = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ : Optional[Any] = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : Tuple , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : str , lowercase_ : Any , ) -> int: lowercase__ : Optional[Any] = TFFunnelForQuestionAnswering(config=lowercase_ ) lowercase__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} lowercase__ : int = model(lowercase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : Dict ) -> Optional[Any]: lowercase__ : List[Any] = self.prepare_config_and_inputs() ( ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ( lowercase__ ) , ) : Optional[int] = config_and_inputs lowercase__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : Union[str, Any] = ( ( TFFunnelModel, TFFunnelForMaskedLM, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForTokenClassification, ) if is_tf_available() else () ) __A : Any = ( { "feature-extraction": (TFFunnelBaseModel, TFFunnelModel), "fill-mask": TFFunnelForMaskedLM, "question-answering": TFFunnelForQuestionAnswering, "text-classification": TFFunnelForSequenceClassification, "token-classification": TFFunnelForTokenClassification, "zero-shot": TFFunnelForSequenceClassification, } if is_tf_available() else {} ) __A : Optional[Any] = False __A : str = False def __UpperCamelCase ( self : Any ) -> Union[str, Any]: lowercase__ : int = TFFunnelModelTester(self ) lowercase__ : List[Any] = ConfigTester(self , config_class=lowercase_ ) def __UpperCamelCase ( self : Dict ) -> Dict: self.config_tester.run_common_tests() def __UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def __UpperCamelCase ( self : Tuple ) -> Any: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase_ ) def __UpperCamelCase ( self : Dict ) -> Dict: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) @require_tf class snake_case_ ( __A ,unittest.TestCase ): __A : Tuple = ( (TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else () ) __A : Optional[Any] = False __A : List[str] = False def __UpperCamelCase ( self : Any ) -> Dict: lowercase__ : Union[str, Any] = TFFunnelModelTester(self , base=lowercase_ ) lowercase__ : Optional[int] = ConfigTester(self , config_class=lowercase_ ) def __UpperCamelCase ( self : str ) -> int: self.config_tester.run_common_tests() def __UpperCamelCase ( self : Optional[int] ) -> Dict: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_base_model(*lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase_ )
87
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
1
import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class snake_case_ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : List[Any] ) -> Tuple: lowercase__ : List[str] = AutoImageProcessor.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) lowercase__ : List[Any] = AutoModelForImageClassification.from_pretrained("microsoft/dit-base-finetuned-rvlcdip" ) model.to(lowercase_ ) from datasets import load_dataset lowercase__ : int = load_dataset("nielsr/rvlcdip-demo" ) lowercase__ : List[str] = dataset["train"][0]["image"].convert("RGB" ) lowercase__ : Optional[Any] = image_processor(lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase__ : Dict = model(**lowercase_ ) lowercase__ : List[str] = outputs.logits lowercase__ : str = torch.Size((1, 16) ) self.assertEqual(logits.shape , lowercase_ ) lowercase__ : List[Any] = torch.tensor( [-0.41_58, -0.40_92, -0.43_47] , device=lowercase_ , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , lowercase_ , atol=1E-4 ) )
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
import unittest from parameterized import parameterized from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXModel, ) class snake_case_ : def __init__( self : str , lowercase_ : str , lowercase_ : Union[str, Any]=13 , lowercase_ : List[Any]=7 , lowercase_ : Dict=True , lowercase_ : Any=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : str=99 , lowercase_ : int=64 , lowercase_ : Tuple=5 , lowercase_ : Any=4 , lowercase_ : Tuple=37 , lowercase_ : List[str]="gelu" , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=0.1 , lowercase_ : List[str]=5_12 , lowercase_ : Dict=16 , lowercase_ : List[str]=2 , lowercase_ : List[Any]=0.02 , lowercase_ : Tuple=3 , lowercase_ : str=4 , lowercase_ : str=None , ) -> str: lowercase__ : Optional[Any] = parent lowercase__ : Tuple = batch_size lowercase__ : Optional[int] = seq_length lowercase__ : List[str] = is_training lowercase__ : Dict = use_input_mask lowercase__ : str = use_token_type_ids lowercase__ : Optional[Any] = use_labels lowercase__ : Optional[int] = vocab_size lowercase__ : List[str] = hidden_size lowercase__ : Tuple = num_hidden_layers lowercase__ : Optional[int] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : Dict = hidden_act lowercase__ : Tuple = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Any = max_position_embeddings lowercase__ : List[str] = type_vocab_size lowercase__ : Union[str, Any] = type_sequence_label_size lowercase__ : Tuple = initializer_range lowercase__ : int = num_labels lowercase__ : Tuple = num_choices lowercase__ : Dict = scope lowercase__ : List[Any] = vocab_size - 1 def __UpperCamelCase ( self : Any ) -> Optional[Any]: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : Optional[int] = None if self.use_input_mask: lowercase__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Dict = None if self.use_labels: lowercase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowercase__ : List[Any] = self.get_config() return config, input_ids, input_mask, token_labels def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: return GPTNeoXConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , ) def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = self.prepare_config_and_inputs() lowercase__ : Optional[Any] = True return config, input_ids, input_mask, token_labels def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] , lowercase_ : int , lowercase_ : str ) -> Any: lowercase__ : str = GPTNeoXModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ ) lowercase__ : Dict = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[Any] ) -> str: lowercase__ : List[str] = True lowercase__ : List[Any] = GPTNeoXModel(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : int = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : List[str] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[Any] , lowercase_ : int ) -> List[str]: lowercase__ : List[str] = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : str , lowercase_ : List[str] , lowercase_ : Tuple , lowercase_ : Optional[int] , lowercase_ : List[str] ) -> Tuple: lowercase__ : Optional[Any] = self.num_labels lowercase__ : List[str] = GPTNeoXForQuestionAnswering(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Any = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : int , lowercase_ : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[Any] ) -> Tuple: lowercase__ : Union[str, Any] = self.num_labels lowercase__ : Optional[Any] = GPTNeoXForSequenceClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowercase__ : Any = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Tuple , lowercase_ : List[str] ) -> Dict: lowercase__ : Dict = self.num_labels lowercase__ : Any = GPTNeoXForTokenClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : int , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : Dict ) -> Union[str, Any]: lowercase__ : Tuple = True lowercase__ : List[Any] = GPTNeoXForCausalLM(config=lowercase_ ) model.to(lowercase_ ) model.eval() # first forward pass lowercase__ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) lowercase__ : Dict = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids lowercase__ : Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : Optional[Any] = ids_tensor((self.batch_size, 3) , vocab_size=2 ) # append to next input_ids and lowercase__ : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) lowercase__ : Dict = torch.cat([input_mask, next_mask] , dim=-1 ) lowercase__ : List[Any] = model(lowercase_ , attention_mask=lowercase_ , output_hidden_states=lowercase_ ) lowercase__ : Optional[Any] = output_from_no_past["hidden_states"][0] lowercase__ : Union[str, Any] = model( lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ , output_hidden_states=lowercase_ , )["hidden_states"][0] # select random slice lowercase__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item() lowercase__ : Any = output_from_no_past[:, -3:, random_slice_idx].detach() lowercase__ : List[str] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-3 ) ) def __UpperCamelCase ( self : str ) -> str: lowercase__ : Tuple = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[int] = config_and_inputs lowercase__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case_ ( __A ,__A ,__A ,unittest.TestCase ): __A : Union[str, Any] = ( ( GPTNeoXModel, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, ) if is_torch_available() else () ) __A : List[str] = (GPTNeoXForCausalLM,) if is_torch_available() else () __A : Any = ( { "feature-extraction": GPTNeoXModel, "question-answering": GPTNeoXForQuestionAnswering, "text-classification": GPTNeoXForSequenceClassification, "text-generation": GPTNeoXForCausalLM, "token-classification": GPTNeoXForTokenClassification, "zero-shot": GPTNeoXForSequenceClassification, } if is_torch_available() else {} ) __A : int = False __A : List[Any] = False __A : Optional[Any] = False __A : Dict = False def __UpperCamelCase ( self : str ) -> Tuple: lowercase__ : int = GPTNeoXModelTester(self ) lowercase__ : Union[str, Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=64 , num_attention_heads=8 ) def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: self.config_tester.run_common_tests() def __UpperCamelCase ( self : int ) -> str: lowercase__ , lowercase__ , lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ , lowercase__ , lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> List[str]: # This regression test was failing with PyTorch < 1.3 lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder() lowercase__ : Tuple = None self.model_tester.create_and_check_model_as_decoder(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Dict ) -> Dict: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Dict ) -> List[Any]: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_causal_lm(*lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: lowercase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase_ ) def __UpperCamelCase ( self : Any ) -> Any: lowercase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @unittest.skip(reason="Feed forward chunking is not implemented" ) def __UpperCamelCase ( self : Dict ) -> Tuple: pass @parameterized.expand([("linear",), ("dynamic",)] ) def __UpperCamelCase ( self : int , lowercase_ : str ) -> int: lowercase__ , lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = ids_tensor([1, 10] , config.vocab_size ) lowercase__ : Tuple = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size ) set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : List[str] = GPTNeoXModel(lowercase_ ) original_model.to(lowercase_ ) original_model.eval() lowercase__ : Dict = original_model(lowercase_ ).last_hidden_state lowercase__ : Dict = original_model(lowercase_ ).last_hidden_state set_seed(42 ) # Fixed seed at init time so the two models get the same random weights lowercase__ : Union[str, Any] = {"type": scaling_type, "factor": 10.0} lowercase__ : str = GPTNeoXModel(lowercase_ ) scaled_model.to(lowercase_ ) scaled_model.eval() lowercase__ : Optional[int] = scaled_model(lowercase_ ).last_hidden_state lowercase__ : Any = scaled_model(lowercase_ ).last_hidden_state # Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original # maximum sequence length, so the outputs for the short input should match. if scaling_type == "dynamic": self.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) ) else: self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) ) # The output should be different for long inputs self.assertFalse(torch.allclose(lowercase_ , lowercase_ , atol=1E-5 ) ) @require_torch class snake_case_ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : Optional[Any] ) -> int: lowercase__ : str = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" ) for checkpointing in [True, False]: lowercase__ : Union[str, Any] = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" ) if checkpointing: model.gradient_checkpointing_enable() else: model.gradient_checkpointing_disable() model.to(lowercase_ ) lowercase__ : Any = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowercase_ ) # The hub repo. is updated on 2023-04-04, resulting in poor outputs. # See: https://github.com/huggingface/transformers/pull/24193 lowercase__ : int = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure" lowercase__ : Optional[Any] = model.generate(**lowercase_ , do_sample=lowercase_ , max_new_tokens=20 ) lowercase__ : str = tokenizer.batch_decode(lowercase_ )[0] self.assertEqual(lowercase_ , lowercase_ )
87
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
1
import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DeformableDetrImageProcessor class snake_case_ ( unittest.TestCase ): def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : Optional[Any]=7 , lowercase_ : List[str]=3 , lowercase_ : int=30 , lowercase_ : List[Any]=4_00 , lowercase_ : Dict=True , lowercase_ : Optional[int]=None , lowercase_ : Any=True , lowercase_ : List[str]=[0.5, 0.5, 0.5] , lowercase_ : List[str]=[0.5, 0.5, 0.5] , lowercase_ : Any=True , lowercase_ : Optional[int]=1 / 2_55 , lowercase_ : str=True , ) -> Tuple: # by setting size["longest_edge"] > max_resolution we're effectively not testing this :p lowercase__ : Optional[int] = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33} lowercase__ : List[str] = parent lowercase__ : str = batch_size lowercase__ : Optional[Any] = num_channels lowercase__ : List[str] = min_resolution lowercase__ : str = max_resolution lowercase__ : Optional[Any] = do_resize lowercase__ : List[Any] = size lowercase__ : int = do_normalize lowercase__ : List[Any] = image_mean lowercase__ : List[str] = image_std lowercase__ : Optional[Any] = do_rescale lowercase__ : Dict = rescale_factor lowercase__ : Union[str, Any] = do_pad def __UpperCamelCase ( self : Any ) -> Dict: return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def __UpperCamelCase ( self : str , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=False ) -> Any: if not batched: lowercase__ : Dict = image_inputs[0] if isinstance(lowercase_ , Image.Image ): lowercase__ , lowercase__ : List[Any] = image.size else: lowercase__ , lowercase__ : List[str] = image.shape[1], image.shape[2] if w < h: lowercase__ : Any = int(self.size["shortest_edge"] * h / w ) lowercase__ : Union[str, Any] = self.size["shortest_edge"] elif w > h: lowercase__ : str = self.size["shortest_edge"] lowercase__ : Tuple = int(self.size["shortest_edge"] * w / h ) else: lowercase__ : Any = self.size["shortest_edge"] lowercase__ : Tuple = self.size["shortest_edge"] else: lowercase__ : int = [] for image in image_inputs: lowercase__ , lowercase__ : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) lowercase__ : Union[str, Any] = max(lowercase_ , key=lambda lowercase_ : item[0] )[0] lowercase__ : Any = max(lowercase_ , key=lambda lowercase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class snake_case_ ( __A ,unittest.TestCase ): __A : str = DeformableDetrImageProcessor if is_vision_available() else None def __UpperCamelCase ( self : List[Any] ) -> Any: lowercase__ : Optional[Any] = DeformableDetrImageProcessingTester(self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: return self.image_processor_tester.prepare_image_processor_dict() def __UpperCamelCase ( self : int ) -> int: lowercase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(lowercase_ , "image_mean" ) ) self.assertTrue(hasattr(lowercase_ , "image_std" ) ) self.assertTrue(hasattr(lowercase_ , "do_normalize" ) ) self.assertTrue(hasattr(lowercase_ , "do_resize" ) ) self.assertTrue(hasattr(lowercase_ , "do_rescale" ) ) self.assertTrue(hasattr(lowercase_ , "do_pad" ) ) self.assertTrue(hasattr(lowercase_ , "size" ) ) def __UpperCamelCase ( self : Tuple ) -> Optional[int]: lowercase__ : List[str] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} ) self.assertEqual(image_processor.do_pad , lowercase_ ) lowercase__ : int = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowercase_ ) self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} ) self.assertEqual(image_processor.do_pad , lowercase_ ) def __UpperCamelCase ( self : Dict ) -> str: pass def __UpperCamelCase ( self : List[str] ) -> Any: # Initialize image_processing lowercase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , Image.Image ) # Test not batched input lowercase__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ , lowercase__ : Optional[Any] = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) lowercase__ : str = image_processing(lowercase_ , return_tensors="pt" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase ( self : List[str] ) -> int: # Initialize image_processing lowercase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , numpify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , np.ndarray ) # Test not batched input lowercase__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : List[str] = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : Tuple = image_processing(lowercase_ , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Any = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def __UpperCamelCase ( self : str ) -> int: # Initialize image_processing lowercase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowercase_ , torchify=lowercase_ ) for image in image_inputs: self.assertIsInstance(lowercase_ , torch.Tensor ) # Test not batched input lowercase__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : int = self.image_processor_tester.get_expected_values(lowercase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched lowercase__ : Union[str, Any] = image_processing(lowercase_ , return_tensors="pt" ).pixel_values lowercase__ , lowercase__ : Tuple = self.image_processor_tester.get_expected_values(lowercase_ , batched=lowercase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: # prepare image and target lowercase__ : List[str] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f: lowercase__ : List[Any] = json.loads(f.read() ) lowercase__ : Dict = {"image_id": 3_97_69, "annotations": target} # encode them lowercase__ : List[str] = DeformableDetrImageProcessor() lowercase__ : Optional[int] = image_processing(images=lowercase_ , annotations=lowercase_ , return_tensors="pt" ) # verify pixel values lowercase__ : Union[str, Any] = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , lowercase_ ) lowercase__ : Dict = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) ) # verify area lowercase__ : Union[str, Any] = torch.tensor([58_87.96_00, 1_12_50.20_61, 48_93_53.84_38, 83_71_22.75_00, 14_79_67.51_56, 16_57_32.34_38] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) ) # verify boxes lowercase__ : Optional[Any] = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ ) lowercase__ : Optional[Any] = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) ) # verify image_id lowercase__ : Dict = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) ) # verify is_crowd lowercase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) ) # verify class_labels lowercase__ : Optional[int] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) ) # verify orig_size lowercase__ : Tuple = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) ) # verify size lowercase__ : List[str] = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) ) @slow def __UpperCamelCase ( self : str ) -> Tuple: # prepare image, target and masks_path lowercase__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f: lowercase__ : Optional[Any] = json.loads(f.read() ) lowercase__ : Union[str, Any] = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target} lowercase__ : Optional[int] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" ) # encode them lowercase__ : Dict = DeformableDetrImageProcessor(format="coco_panoptic" ) lowercase__ : Optional[Any] = image_processing(images=lowercase_ , annotations=lowercase_ , masks_path=lowercase_ , return_tensors="pt" ) # verify pixel values lowercase__ : Tuple = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["pixel_values"].shape , lowercase_ ) lowercase__ : int = torch.tensor([0.27_96, 0.31_38, 0.34_81] ) self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowercase_ , atol=1E-4 ) ) # verify area lowercase__ : Tuple = torch.tensor([14_79_79.68_75, 16_55_27.04_69, 48_46_38.59_38, 1_12_92.93_75, 58_79.65_62, 76_34.11_47] ) self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowercase_ ) ) # verify boxes lowercase__ : Tuple = torch.Size([6, 4] ) self.assertEqual(encoding["labels"][0]["boxes"].shape , lowercase_ ) lowercase__ : Dict = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] ) self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowercase_ , atol=1E-3 ) ) # verify image_id lowercase__ : List[str] = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowercase_ ) ) # verify is_crowd lowercase__ : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowercase_ ) ) # verify class_labels lowercase__ : Any = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowercase_ ) ) # verify masks lowercase__ : str = 82_28_73 self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowercase_ ) # verify orig_size lowercase__ : Dict = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowercase_ ) ) # verify size lowercase__ : str = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowercase_ ) )
87
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
1
from typing import Dict, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, flip_channel_order, get_resize_output_image_size, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_torch_available, is_torch_tensor, is_vision_available, logging if is_vision_available(): import PIL if is_torch_available(): import torch UpperCamelCase = logging.get_logger(__name__) class snake_case_ ( __A ): __A : Any = ["pixel_values"] def __init__( self : str , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BILINEAR , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , **lowercase_ : Dict , ) -> None: super().__init__(**lowercase_ ) lowercase__ : List[str] = size if size is not None else {"shortest_edge": 2_24} lowercase__ : List[Any] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : Optional[Any] = crop_size if crop_size is not None else {"height": 2_56, "width": 2_56} lowercase__ : str = get_size_dict(lowercase_ , param_name="crop_size" ) lowercase__ : Tuple = do_resize lowercase__ : int = size lowercase__ : str = resample lowercase__ : Optional[Any] = do_rescale lowercase__ : Tuple = rescale_factor lowercase__ : List[str] = do_center_crop lowercase__ : List[str] = crop_size lowercase__ : Dict = do_flip_channel_order def __UpperCamelCase ( self : Optional[int] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PIL.Image.BILINEAR , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: lowercase__ : List[str] = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` dictionary must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Optional[int] = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Any , ) -> np.ndarray: lowercase__ : Any = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` dictionary must contain the keys `height` and `width`. Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Dict , ) -> Optional[Any]: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Dict , lowercase_ : np.ndarray , lowercase_ : Optional[Union[str, ChannelDimension]] = None ) -> np.ndarray: return flip_channel_order(lowercase_ , data_format=lowercase_ ) def __UpperCamelCase ( self : Any , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : ChannelDimension = ChannelDimension.FIRST , **lowercase_ : Tuple , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Any = resample if resample is not None else self.resample lowercase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : int = ( do_flip_channel_order if do_flip_channel_order is not None else self.do_flip_channel_order ) lowercase__ : Dict = size if size is not None else self.size lowercase__ : int = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : str = crop_size if crop_size is not None else self.crop_size lowercase__ : str = get_size_dict(lowercase_ , param_name="crop_size" ) lowercase__ : Optional[Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) # All transformations expect numpy arrays. lowercase__ : Tuple = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : int = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : List[Any] = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : Any = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] # the pretrained checkpoints assume images are BGR, not RGB if do_flip_channel_order: lowercase__ : str = [self.flip_channel_order(image=lowercase_ ) for image in images] lowercase__ : str = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : Optional[int] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : int , lowercase_ : List[Tuple] = None ) -> List[Any]: lowercase__ : Optional[int] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Make sure that you pass in as many target sizes as the batch dimension of the logits" ) if is_torch_tensor(lowercase_ ): lowercase__ : Any = target_sizes.numpy() lowercase__ : Optional[Any] = [] for idx in range(len(lowercase_ ) ): lowercase__ : Tuple = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="bilinear" , align_corners=lowercase_ ) lowercase__ : Union[str, Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(lowercase_ ) else: lowercase__ : Optional[int] = logits.argmax(dim=1 ) lowercase__ : Optional[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
87
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
1
import unittest import numpy as np import timeout_decorator # noqa from transformers import BlenderbotSmallConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...generation.test_flax_utils import FlaxGenerationTesterMixin from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html UpperCamelCase = '''platform''' import jax import jax.numpy as jnp from transformers.models.blenderbot_small.modeling_flax_blenderbot_small import ( FlaxBlenderbotSmallForConditionalGeneration, FlaxBlenderbotSmallModel, shift_tokens_right, ) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : str , _lowerCamelCase : str=None , _lowerCamelCase : int=None , _lowerCamelCase : str=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : str=None , _lowerCamelCase : str=None , ): if attention_mask is None: lowercase__ : Dict = np.where(input_ids != config.pad_token_id , 1 , 0) if decoder_attention_mask is None: lowercase__ : Optional[Any] = np.where(decoder_input_ids != config.pad_token_id , 1 , 0) if head_mask is None: lowercase__ : str = np.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: lowercase__ : Dict = np.ones((config.decoder_layers, config.decoder_attention_heads)) if cross_attn_head_mask is None: lowercase__ : int = np.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } class snake_case_ : def __init__( self : Dict , lowercase_ : Any , lowercase_ : List[str]=13 , lowercase_ : List[Any]=7 , lowercase_ : int=True , lowercase_ : Any=False , lowercase_ : int=99 , lowercase_ : Tuple=16 , lowercase_ : Optional[Any]=2 , lowercase_ : Union[str, Any]=4 , lowercase_ : Dict=4 , lowercase_ : Optional[Any]="gelu" , lowercase_ : Optional[int]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Optional[Any]=32 , lowercase_ : Tuple=2 , lowercase_ : Any=1 , lowercase_ : List[str]=0 , lowercase_ : Union[str, Any]=0.02 , ) -> Dict: lowercase__ : Optional[int] = parent lowercase__ : str = batch_size lowercase__ : str = seq_length lowercase__ : Tuple = is_training lowercase__ : Optional[Any] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : str = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : Any = num_attention_heads lowercase__ : Dict = intermediate_size lowercase__ : Tuple = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : Union[str, Any] = attention_probs_dropout_prob lowercase__ : List[Any] = max_position_embeddings lowercase__ : Optional[int] = eos_token_id lowercase__ : List[Any] = pad_token_id lowercase__ : Union[str, Any] = bos_token_id lowercase__ : Optional[int] = initializer_range def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ : Any = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size ) lowercase__ : Tuple = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 ) lowercase__ : Tuple = shift_tokens_right(lowercase_ , 1 , 2 ) lowercase__ : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=lowercase_ , ) lowercase__ : List[Any] = prepare_blenderbot_inputs_dict(lowercase_ , lowercase_ , lowercase_ ) return config, inputs_dict def __UpperCamelCase ( self : Tuple ) -> int: lowercase__ , lowercase__ : List[Any] = self.prepare_config_and_inputs() return config, inputs_dict def __UpperCamelCase ( self : Any , lowercase_ : List[Any] , lowercase_ : Any , lowercase_ : List[Any] ) -> List[Any]: lowercase__ : Union[str, Any] = 20 lowercase__ : List[Any] = model_class_name(lowercase_ ) lowercase__ : Optional[int] = model.encode(inputs_dict["input_ids"] ) lowercase__ , lowercase__ : Optional[int] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase__ : Optional[int] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ ) lowercase__ : Tuple = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" ) lowercase__ : List[str] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : int = model.decode( decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , ) lowercase__ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowercase__ : Dict = model.decode( decoder_input_ids[:, -1:] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase_ , ) lowercase__ : Union[str, Any] = model.decode(lowercase_ , lowercase_ ) lowercase__ : Tuple = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) def __UpperCamelCase ( self : Any , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Any ) -> Union[str, Any]: lowercase__ : Optional[Any] = 20 lowercase__ : List[str] = model_class_name(lowercase_ ) lowercase__ : Any = model.encode(inputs_dict["input_ids"] ) lowercase__ , lowercase__ : List[str] = ( inputs_dict["decoder_input_ids"], inputs_dict["decoder_attention_mask"], ) lowercase__ : Tuple = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] , axis=-1 , ) lowercase__ : List[str] = model.init_cache(decoder_input_ids.shape[0] , lowercase_ , lowercase_ ) lowercase__ : Tuple = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , ) lowercase__ : str = model.decode( decoder_input_ids[:, :-1] , lowercase_ , decoder_attention_mask=lowercase_ , past_key_values=lowercase_ , decoder_position_ids=lowercase_ , ) lowercase__ : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" ) lowercase__ : int = model.decode( decoder_input_ids[:, -1:] , lowercase_ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase_ , decoder_position_ids=lowercase_ , ) lowercase__ : int = model.decode(lowercase_ , lowercase_ , decoder_attention_mask=lowercase_ ) lowercase__ : Dict = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 , msg=F'''Max diff is {diff}''' ) @require_flax class snake_case_ ( unittest.TestCase ): __A : Dict = 99 def __UpperCamelCase ( self : Any ) -> Tuple: lowercase__ : Tuple = np.array( [ [71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 82, 2], [5, 97, 17, 39, 94, 40, 2], [76, 83, 94, 25, 70, 78, 2], [87, 59, 41, 35, 48, 66, 2], [55, 13, 16, 58, 5, 2, 1], # note padding [64, 27, 31, 51, 12, 75, 2], [52, 64, 86, 17, 83, 39, 2], [48, 61, 9, 24, 71, 82, 2], [26, 1, 60, 48, 22, 13, 2], [21, 5, 62, 28, 14, 76, 2], [45, 98, 37, 86, 59, 48, 2], [70, 70, 50, 9, 28, 0, 2], ] , dtype=np.intaa , ) lowercase__ : Tuple = input_ids.shape[0] lowercase__ : Union[str, Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size def __UpperCamelCase ( self : str ) -> Tuple: lowercase__ , lowercase__ , lowercase__ : Optional[int] = self._get_config_and_data() lowercase__ : Tuple = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ ) lowercase__ : List[str] = lm_model(input_ids=lowercase_ ) lowercase__ : int = (batch_size, input_ids.shape[1], config.vocab_size) self.assertEqual(outputs["logits"].shape , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> str: lowercase__ : List[Any] = BlenderbotSmallConfig( vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , ) lowercase__ : Dict = FlaxBlenderbotSmallForConditionalGeneration(lowercase_ ) lowercase__ : Optional[int] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa ) lowercase__ : Union[str, Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa ) lowercase__ : Union[str, Any] = lm_model(input_ids=lowercase_ , decoder_input_ids=lowercase_ ) lowercase__ : Optional[int] = (*summary.shape, config.vocab_size) self.assertEqual(outputs["logits"].shape , lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> str: lowercase__ : List[Any] = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa ) lowercase__ : int = shift_tokens_right(lowercase_ , 1 , 2 ) lowercase__ : Optional[int] = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum() lowercase__ : Any = np.equal(lowercase_ , 1 ).astype(np.floataa ).sum() self.assertEqual(shifted.shape , input_ids.shape ) self.assertEqual(lowercase_ , n_pad_before - 1 ) self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() ) @require_flax class snake_case_ ( __A ,unittest.TestCase ,__A ): __A : List[str] = True __A : List[str] = ( ( FlaxBlenderbotSmallModel, FlaxBlenderbotSmallForConditionalGeneration, ) if is_flax_available() else () ) __A : str = (FlaxBlenderbotSmallForConditionalGeneration,) if is_flax_available() else () def __UpperCamelCase ( self : Any ) -> List[str]: lowercase__ : Dict = FlaxBlenderbotSmallModelTester(self ) def __UpperCamelCase ( self : str ) -> List[str]: lowercase__ , lowercase__ : Any = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Any ) -> List[str]: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Tuple ) -> List[str]: lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Union[str, Any] = model_class(lowercase_ ) @jax.jit def encode_jitted(lowercase_ : Any , lowercase_ : List[Any]=None , **lowercase_ : Dict ): return model.encode(input_ids=lowercase_ , attention_mask=lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : List[str] = encode_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = encode_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]: lowercase__ , lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : int = model_class(lowercase_ ) lowercase__ : List[str] = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] ) lowercase__ : int = { "decoder_input_ids": inputs_dict["decoder_input_ids"], "decoder_attention_mask": inputs_dict["decoder_attention_mask"], "encoder_outputs": encoder_outputs, } @jax.jit def decode_jitted(lowercase_ : Any , lowercase_ : int , lowercase_ : Optional[int] ): return model.decode( decoder_input_ids=lowercase_ , decoder_attention_mask=lowercase_ , encoder_outputs=lowercase_ , ) with self.subTest("JIT Enabled" ): lowercase__ : Any = decode_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Tuple = decode_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) @slow def __UpperCamelCase ( self : Dict ) -> int: for model_class_name in self.all_model_classes: lowercase__ : List[Any] = model_class_name.from_pretrained("facebook/blenderbot_small-90M" ) # FlaxBlenderbotForSequenceClassification expects eos token in input_ids lowercase__ : Optional[Any] = np.ones((1, 1) ) * model.config.eos_token_id lowercase__ : str = model(lowercase_ ) self.assertIsNotNone(lowercase_ )
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {'''ctrl''': '''https://huggingface.co/ctrl/resolve/main/config.json'''} class snake_case_ ( __A ): __A : str = "ctrl" __A : Tuple = ["past_key_values"] __A : Optional[int] = { "max_position_embeddings": "n_positions", "hidden_size": "n_embd", "num_attention_heads": "n_head", "num_hidden_layers": "n_layer", } def __init__( self : Dict , lowercase_ : Tuple=24_65_34 , lowercase_ : List[str]=2_56 , lowercase_ : Tuple=12_80 , lowercase_ : List[Any]=81_92 , lowercase_ : Union[str, Any]=48 , lowercase_ : Any=16 , lowercase_ : List[str]=0.1 , lowercase_ : Any=0.1 , lowercase_ : Optional[Any]=1E-6 , lowercase_ : Optional[Any]=0.02 , lowercase_ : Optional[int]=True , **lowercase_ : Optional[Any] , ) -> Optional[int]: lowercase__ : Union[str, Any] = vocab_size lowercase__ : Optional[Any] = n_positions lowercase__ : Optional[Any] = n_embd lowercase__ : Tuple = n_layer lowercase__ : List[str] = n_head lowercase__ : Union[str, Any] = dff lowercase__ : Dict = resid_pdrop lowercase__ : Any = embd_pdrop lowercase__ : List[str] = layer_norm_epsilon lowercase__ : int = initializer_range lowercase__ : Union[str, Any] = use_cache super().__init__(**lowercase_ )
87
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
1
import unittest from accelerate import debug_launcher from accelerate.test_utils import require_cpu, test_ops, test_script @require_cpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: debug_launcher(test_script.main ) def __UpperCamelCase ( self : Any ) -> str: debug_launcher(test_ops.main )
87
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
1
def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : bool = False): if n == 2: return True if not n % 2 or n < 2: return False if n > 5 and n % 10 not in (1, 3, 7, 9): # can quickly check last digit return False if n > 3_3170_4406_4679_8873_8596_1981 and not allow_probable: raise ValueError( "Warning: upper bound of deterministic test is exceeded. " "Pass allow_probable=True to allow probabilistic test. " "A return value of True indicates a probable prime.") # array bounds provided by analysis lowercase__ : Optional[int] = [ 2047, 137_3653, 2532_6001, 32_1503_1751, 2_1523_0289_8747, 3_4747_4966_0383, 341_5500_7172_8321, 1, 382_5123_0565_4641_3051, 1, 1, 3186_6585_7834_0311_5116_7461, 3_3170_4406_4679_8873_8596_1981, ] lowercase__ : int = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41] for idx, _p in enumerate(_lowerCamelCase , 1): if n < _p: # then we have our last prime to check lowercase__ : List[str] = primes[:idx] break lowercase__ , lowercase__ : Optional[Any] = n - 1, 0 # break up n -1 into a power of 2 (s) and # remaining odd component # essentially, solve for d * 2 ** s == n - 1 while d % 2 == 0: d //= 2 s += 1 for prime in plist: lowercase__ : str = False for r in range(_lowerCamelCase): lowercase__ : int = pow(_lowerCamelCase , d * 2**r , _lowerCamelCase) # see article for analysis explanation for m if (r == 0 and m == 1) or ((m + 1) % n == 0): lowercase__ : Tuple = True # this loop will not determine compositeness break if pr: continue # if pr is False, then the above loop never evaluated to true, # and the n MUST be composite return False return True def lowercase_ ( ): assert not miller_rabin(561) assert miller_rabin(563) # 2047 assert not miller_rabin(83_8201) assert miller_rabin(83_8207) # 1_373_653 assert not miller_rabin(1731_6001) assert miller_rabin(1731_6017) # 25_326_001 assert not miller_rabin(30_7838_6641) assert miller_rabin(30_7838_6653) # 3_215_031_751 assert not miller_rabin(1_7130_4557_4801) assert miller_rabin(1_7130_4557_4819) # 2_152_302_898_747 assert not miller_rabin(2_7797_9972_8307) assert miller_rabin(2_7797_9972_8327) # 3_474_749_660_383 assert not miller_rabin(113_8500_2390_9441) assert miller_rabin(113_8500_2390_9527) # 341_550_071_728_321 assert not miller_rabin(127_5041_0188_4880_4351) assert miller_rabin(127_5041_0188_4880_4391) # 3_825_123_056_546_413_051 assert not miller_rabin(796_6646_4458_5077_8779_1867) assert miller_rabin(796_6646_4458_5077_8779_1951) # 318_665_857_834_031_151_167_461 assert not miller_rabin(5528_4067_7446_6478_9766_0333) assert miller_rabin(5528_4067_7446_6478_9766_0359) # 3_317_044_064_679_887_385_961_981 # upper limit for probabilistic test if __name__ == "__main__": test_miller_rabin()
87
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
1
from collections import OrderedDict from ...utils import logging from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update from .configuration_auto import CONFIG_MAPPING_NAMES UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = OrderedDict( [ # Base model mapping ('''albert''', '''FlaxAlbertModel'''), ('''bart''', '''FlaxBartModel'''), ('''beit''', '''FlaxBeitModel'''), ('''bert''', '''FlaxBertModel'''), ('''big_bird''', '''FlaxBigBirdModel'''), ('''blenderbot''', '''FlaxBlenderbotModel'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallModel'''), ('''clip''', '''FlaxCLIPModel'''), ('''distilbert''', '''FlaxDistilBertModel'''), ('''electra''', '''FlaxElectraModel'''), ('''gpt-sw3''', '''FlaxGPT2Model'''), ('''gpt2''', '''FlaxGPT2Model'''), ('''gpt_neo''', '''FlaxGPTNeoModel'''), ('''gptj''', '''FlaxGPTJModel'''), ('''longt5''', '''FlaxLongT5Model'''), ('''marian''', '''FlaxMarianModel'''), ('''mbart''', '''FlaxMBartModel'''), ('''mt5''', '''FlaxMT5Model'''), ('''opt''', '''FlaxOPTModel'''), ('''pegasus''', '''FlaxPegasusModel'''), ('''regnet''', '''FlaxRegNetModel'''), ('''resnet''', '''FlaxResNetModel'''), ('''roberta''', '''FlaxRobertaModel'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormModel'''), ('''roformer''', '''FlaxRoFormerModel'''), ('''t5''', '''FlaxT5Model'''), ('''vision-text-dual-encoder''', '''FlaxVisionTextDualEncoderModel'''), ('''vit''', '''FlaxViTModel'''), ('''wav2vec2''', '''FlaxWav2Vec2Model'''), ('''whisper''', '''FlaxWhisperModel'''), ('''xglm''', '''FlaxXGLMModel'''), ('''xlm-roberta''', '''FlaxXLMRobertaModel'''), ] ) UpperCamelCase = OrderedDict( [ # Model for pre-training mapping ('''albert''', '''FlaxAlbertForPreTraining'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForPreTraining'''), ('''big_bird''', '''FlaxBigBirdForPreTraining'''), ('''electra''', '''FlaxElectraForPreTraining'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ('''wav2vec2''', '''FlaxWav2Vec2ForPreTraining'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Masked LM mapping ('''albert''', '''FlaxAlbertForMaskedLM'''), ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''bert''', '''FlaxBertForMaskedLM'''), ('''big_bird''', '''FlaxBigBirdForMaskedLM'''), ('''distilbert''', '''FlaxDistilBertForMaskedLM'''), ('''electra''', '''FlaxElectraForMaskedLM'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''roberta''', '''FlaxRobertaForMaskedLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMaskedLM'''), ('''roformer''', '''FlaxRoFormerForMaskedLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMaskedLM'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Seq2Seq Causal LM mapping ('''bart''', '''FlaxBartForConditionalGeneration'''), ('''blenderbot''', '''FlaxBlenderbotForConditionalGeneration'''), ('''blenderbot-small''', '''FlaxBlenderbotSmallForConditionalGeneration'''), ('''encoder-decoder''', '''FlaxEncoderDecoderModel'''), ('''longt5''', '''FlaxLongT5ForConditionalGeneration'''), ('''marian''', '''FlaxMarianMTModel'''), ('''mbart''', '''FlaxMBartForConditionalGeneration'''), ('''mt5''', '''FlaxMT5ForConditionalGeneration'''), ('''pegasus''', '''FlaxPegasusForConditionalGeneration'''), ('''t5''', '''FlaxT5ForConditionalGeneration'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Image-classsification ('''beit''', '''FlaxBeitForImageClassification'''), ('''regnet''', '''FlaxRegNetForImageClassification'''), ('''resnet''', '''FlaxResNetForImageClassification'''), ('''vit''', '''FlaxViTForImageClassification'''), ] ) UpperCamelCase = OrderedDict( [ ('''vision-encoder-decoder''', '''FlaxVisionEncoderDecoderModel'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Causal LM mapping ('''bart''', '''FlaxBartForCausalLM'''), ('''bert''', '''FlaxBertForCausalLM'''), ('''big_bird''', '''FlaxBigBirdForCausalLM'''), ('''electra''', '''FlaxElectraForCausalLM'''), ('''gpt-sw3''', '''FlaxGPT2LMHeadModel'''), ('''gpt2''', '''FlaxGPT2LMHeadModel'''), ('''gpt_neo''', '''FlaxGPTNeoForCausalLM'''), ('''gptj''', '''FlaxGPTJForCausalLM'''), ('''opt''', '''FlaxOPTForCausalLM'''), ('''roberta''', '''FlaxRobertaForCausalLM'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForCausalLM'''), ('''xglm''', '''FlaxXGLMForCausalLM'''), ('''xlm-roberta''', '''FlaxXLMRobertaForCausalLM'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Sequence Classification mapping ('''albert''', '''FlaxAlbertForSequenceClassification'''), ('''bart''', '''FlaxBartForSequenceClassification'''), ('''bert''', '''FlaxBertForSequenceClassification'''), ('''big_bird''', '''FlaxBigBirdForSequenceClassification'''), ('''distilbert''', '''FlaxDistilBertForSequenceClassification'''), ('''electra''', '''FlaxElectraForSequenceClassification'''), ('''mbart''', '''FlaxMBartForSequenceClassification'''), ('''roberta''', '''FlaxRobertaForSequenceClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForSequenceClassification'''), ('''roformer''', '''FlaxRoFormerForSequenceClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForSequenceClassification'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Question Answering mapping ('''albert''', '''FlaxAlbertForQuestionAnswering'''), ('''bart''', '''FlaxBartForQuestionAnswering'''), ('''bert''', '''FlaxBertForQuestionAnswering'''), ('''big_bird''', '''FlaxBigBirdForQuestionAnswering'''), ('''distilbert''', '''FlaxDistilBertForQuestionAnswering'''), ('''electra''', '''FlaxElectraForQuestionAnswering'''), ('''mbart''', '''FlaxMBartForQuestionAnswering'''), ('''roberta''', '''FlaxRobertaForQuestionAnswering'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForQuestionAnswering'''), ('''roformer''', '''FlaxRoFormerForQuestionAnswering'''), ('''xlm-roberta''', '''FlaxXLMRobertaForQuestionAnswering'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Token Classification mapping ('''albert''', '''FlaxAlbertForTokenClassification'''), ('''bert''', '''FlaxBertForTokenClassification'''), ('''big_bird''', '''FlaxBigBirdForTokenClassification'''), ('''distilbert''', '''FlaxDistilBertForTokenClassification'''), ('''electra''', '''FlaxElectraForTokenClassification'''), ('''roberta''', '''FlaxRobertaForTokenClassification'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForTokenClassification'''), ('''roformer''', '''FlaxRoFormerForTokenClassification'''), ('''xlm-roberta''', '''FlaxXLMRobertaForTokenClassification'''), ] ) UpperCamelCase = OrderedDict( [ # Model for Multiple Choice mapping ('''albert''', '''FlaxAlbertForMultipleChoice'''), ('''bert''', '''FlaxBertForMultipleChoice'''), ('''big_bird''', '''FlaxBigBirdForMultipleChoice'''), ('''distilbert''', '''FlaxDistilBertForMultipleChoice'''), ('''electra''', '''FlaxElectraForMultipleChoice'''), ('''roberta''', '''FlaxRobertaForMultipleChoice'''), ('''roberta-prelayernorm''', '''FlaxRobertaPreLayerNormForMultipleChoice'''), ('''roformer''', '''FlaxRoFormerForMultipleChoice'''), ('''xlm-roberta''', '''FlaxXLMRobertaForMultipleChoice'''), ] ) UpperCamelCase = OrderedDict( [ ('''bert''', '''FlaxBertForNextSentencePrediction'''), ] ) UpperCamelCase = OrderedDict( [ ('''speech-encoder-decoder''', '''FlaxSpeechEncoderDecoderModel'''), ('''whisper''', '''FlaxWhisperForConditionalGeneration'''), ] ) UpperCamelCase = OrderedDict( [ ('''whisper''', '''FlaxWhisperForAudioClassification'''), ] ) UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES) UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES) UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES) UpperCamelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES ) UpperCamelCase = _LazyAutoMapping( CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES ) class snake_case_ ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_MAPPING UpperCamelCase = auto_class_update(FlaxAutoModel) class snake_case_ ( _BaseAutoModelClass ): __A : Tuple = FLAX_MODEL_FOR_PRETRAINING_MAPPING UpperCamelCase = auto_class_update(FlaxAutoModelForPreTraining, head_doc='''pretraining''') class snake_case_ ( _BaseAutoModelClass ): __A : int = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING UpperCamelCase = auto_class_update(FlaxAutoModelForCausalLM, head_doc='''causal language modeling''') class snake_case_ ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_MASKED_LM_MAPPING UpperCamelCase = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='''masked language modeling''') class snake_case_ ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING UpperCamelCase = auto_class_update( FlaxAutoModelForSeqaSeqLM, head_doc='''sequence-to-sequence language modeling''', checkpoint_for_example='''t5-base''' ) class snake_case_ ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING UpperCamelCase = auto_class_update( FlaxAutoModelForSequenceClassification, head_doc='''sequence classification''' ) class snake_case_ ( _BaseAutoModelClass ): __A : Optional[int] = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING UpperCamelCase = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='''question answering''') class snake_case_ ( _BaseAutoModelClass ): __A : Union[str, Any] = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING UpperCamelCase = auto_class_update( FlaxAutoModelForTokenClassification, head_doc='''token classification''' ) class snake_case_ ( _BaseAutoModelClass ): __A : int = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING UpperCamelCase = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='''multiple choice''') class snake_case_ ( _BaseAutoModelClass ): __A : str = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING UpperCamelCase = auto_class_update( FlaxAutoModelForNextSentencePrediction, head_doc='''next sentence prediction''' ) class snake_case_ ( _BaseAutoModelClass ): __A : Optional[Any] = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING UpperCamelCase = auto_class_update( FlaxAutoModelForImageClassification, head_doc='''image classification''' ) class snake_case_ ( _BaseAutoModelClass ): __A : List[str] = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING UpperCamelCase = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='''vision-to-text modeling''') class snake_case_ ( _BaseAutoModelClass ): __A : Dict = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING UpperCamelCase = auto_class_update( FlaxAutoModelForSpeechSeqaSeq, head_doc='''sequence-to-sequence speech-to-text modeling''' )
87
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
1
from bisect import bisect from itertools import accumulate def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Union[str, Any]): lowercase__ : int = sorted(zip(_lowerCamelCase , _lowerCamelCase) , key=lambda _lowerCamelCase: x[0] / x[1] , reverse=_lowerCamelCase) lowercase__ , lowercase__ : List[Any] = [i[0] for i in r], [i[1] for i in r] lowercase__ : Union[str, Any] = list(accumulate(_lowerCamelCase)) lowercase__ : Optional[int] = bisect(_lowerCamelCase , _lowerCamelCase) return ( 0 if k == 0 else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k]) if k != n else sum(vl[:k]) ) if __name__ == "__main__": import doctest doctest.testmod()
87
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
1
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class snake_case_ : __A : CommonSchedulerState # setable values __A : jnp.ndarray __A : jnp.ndarray __A : Optional[int] = None @classmethod def __UpperCamelCase ( cls : str , lowercase_ : CommonSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray ) -> Optional[int]: return cls(common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_ ) @dataclass class snake_case_ ( __A ): __A : DDPMSchedulerState class snake_case_ ( __A ,__A ): __A : List[Any] = [e.name for e in FlaxKarrasDiffusionSchedulers] __A : jnp.dtype @property def __UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: return True @register_to_config def __init__( self : Optional[Any] , lowercase_ : int = 10_00 , lowercase_ : float = 0.00_01 , lowercase_ : float = 0.02 , lowercase_ : str = "linear" , lowercase_ : Optional[jnp.ndarray] = None , lowercase_ : str = "fixed_small" , lowercase_ : bool = True , lowercase_ : str = "epsilon" , lowercase_ : jnp.dtype = jnp.floataa , ) -> List[str]: lowercase__ : Any = dtype def __UpperCamelCase ( self : int , lowercase_ : Optional[CommonSchedulerState] = None ) -> DDPMSchedulerState: if common is None: lowercase__ : str = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution lowercase__ : Any = jnp.array(1.0 , dtype=self.dtype ) lowercase__ : Union[str, Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=lowercase_ , init_noise_sigma=lowercase_ , timesteps=lowercase_ , ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : Optional[int] = None ) -> jnp.ndarray: return sample def __UpperCamelCase ( self : int , lowercase_ : DDPMSchedulerState , lowercase_ : int , lowercase_ : Tuple = () ) -> DDPMSchedulerState: lowercase__ : int = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 lowercase__ : List[str] = (jnp.arange(0 , lowercase_ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=lowercase_ , timesteps=lowercase_ , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : DDPMSchedulerState , lowercase_ : Optional[Any] , lowercase_ : Any=None , lowercase_ : Tuple=None ) -> Optional[int]: lowercase__ : Dict = state.common.alphas_cumprod[t] lowercase__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample lowercase__ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: lowercase__ : Dict = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": lowercase__ : List[Any] = jnp.clip(lowercase_ , a_min=1E-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": lowercase__ : Union[str, Any] = jnp.log(jnp.clip(lowercase_ , a_min=1E-20 ) ) elif variance_type == "fixed_large": lowercase__ : Optional[int] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log lowercase__ : List[str] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": lowercase__ : Tuple = variance lowercase__ : Union[str, Any] = state.common.betas[t] lowercase__ : int = (predicted_variance + 1) / 2 lowercase__ : List[str] = frac * max_log + (1 - frac) * min_log return variance def __UpperCamelCase ( self : Any , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : int , lowercase_ : jnp.ndarray , lowercase_ : Optional[jax.random.KeyArray] = None , lowercase_ : bool = True , ) -> Union[FlaxDDPMSchedulerOutput, Tuple]: lowercase__ : str = timestep if key is None: lowercase__ : Any = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: lowercase__ , lowercase__ : Union[str, Any] = jnp.split(lowercase_ , sample.shape[1] , axis=1 ) else: lowercase__ : Any = None # 1. compute alphas, betas lowercase__ : str = state.common.alphas_cumprod[t] lowercase__ : Optional[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) lowercase__ : Optional[int] = 1 - alpha_prod_t lowercase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": lowercase__ : Any = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": lowercase__ : Any = model_output elif self.config.prediction_type == "v_prediction": lowercase__ : List[str] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( F'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ''' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: lowercase__ : str = jnp.clip(lowercase_ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t lowercase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf lowercase__ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): lowercase__ : Optional[int] = jax.random.split(lowercase_ , num=1 ) lowercase__ : List[str] = jax.random.normal(lowercase_ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(lowercase_ , lowercase_ , predicted_variance=lowercase_ ) ** 0.5) * noise lowercase__ : int = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) lowercase__ : Any = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=lowercase_ , state=lowercase_ ) def __UpperCamelCase ( self : List[Any] , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ) -> jnp.ndarray: return add_noise_common(state.common , lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : DDPMSchedulerState , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , lowercase_ : jnp.ndarray , ) -> jnp.ndarray: return get_velocity_common(state.common , lowercase_ , lowercase_ , lowercase_ ) def __len__( self : Any ) -> Optional[Any]: return self.config.num_train_timesteps
87
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
1
from ...configuration_utils import PretrainedConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''s-JoL/Open-Llama-V1''': '''https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json''', } class snake_case_ ( __A ): __A : Optional[Any] = "open-llama" def __init__( self : Tuple , lowercase_ : str=10_00_00 , lowercase_ : Any=40_96 , lowercase_ : Dict=1_10_08 , lowercase_ : Union[str, Any]=32 , lowercase_ : int=32 , lowercase_ : Tuple="silu" , lowercase_ : Any=20_48 , lowercase_ : int=0.02 , lowercase_ : Any=1E-6 , lowercase_ : int=True , lowercase_ : Optional[Any]=0 , lowercase_ : Optional[Any]=1 , lowercase_ : Any=2 , lowercase_ : Any=False , lowercase_ : int=True , lowercase_ : Union[str, Any]=0.1 , lowercase_ : Optional[int]=0.1 , lowercase_ : Optional[int]=True , lowercase_ : Union[str, Any]=True , lowercase_ : Any=None , **lowercase_ : Any , ) -> Union[str, Any]: lowercase__ : Tuple = vocab_size lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = intermediate_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : Dict = num_attention_heads lowercase__ : List[str] = hidden_act lowercase__ : Any = initializer_range lowercase__ : List[Any] = rms_norm_eps lowercase__ : Optional[int] = use_cache lowercase__ : Tuple = kwargs.pop( "use_memorry_efficient_attention" , lowercase_ ) lowercase__ : Optional[Any] = hidden_dropout_prob lowercase__ : str = attention_dropout_prob lowercase__ : Tuple = use_stable_embedding lowercase__ : Dict = shared_input_output_embedding lowercase__ : Optional[Any] = rope_scaling self._rope_scaling_validation() super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , tie_word_embeddings=lowercase_ , **lowercase_ , ) def __UpperCamelCase ( self : Dict ) -> int: if self.rope_scaling is None: return if not isinstance(self.rope_scaling , lowercase_ ) or len(self.rope_scaling ) != 2: raise ValueError( "`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, " F'''got {self.rope_scaling}''' ) lowercase__ : Any = self.rope_scaling.get("type" , lowercase_ ) lowercase__ : Tuple = self.rope_scaling.get("factor" , lowercase_ ) if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]: raise ValueError( F'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' ) if rope_scaling_factor is None or not isinstance(lowercase_ , lowercase_ ) or rope_scaling_factor <= 1.0: raise ValueError(F'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' )
87
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
1
import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import TimesformerConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, TimesformerForVideoClassification, TimesformerModel, ) from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class snake_case_ : def __init__( self : Optional[int] , lowercase_ : str , lowercase_ : Dict=13 , lowercase_ : Optional[Any]=10 , lowercase_ : int=3 , lowercase_ : str=2 , lowercase_ : Tuple=2 , lowercase_ : int=True , lowercase_ : int=True , lowercase_ : Optional[int]=32 , lowercase_ : List[Any]=5 , lowercase_ : Optional[Any]=4 , lowercase_ : List[Any]=37 , lowercase_ : Tuple="gelu" , lowercase_ : int=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : str=10 , lowercase_ : Optional[int]=0.02 , lowercase_ : List[str]="divided_space_time" , lowercase_ : Union[str, Any]=None , ) -> List[str]: lowercase__ : int = parent lowercase__ : str = batch_size lowercase__ : Optional[int] = image_size lowercase__ : Tuple = num_channels lowercase__ : List[str] = patch_size lowercase__ : Optional[Any] = num_frames lowercase__ : Any = is_training lowercase__ : Union[str, Any] = use_labels lowercase__ : List[str] = hidden_size lowercase__ : Union[str, Any] = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : int = intermediate_size lowercase__ : Dict = hidden_act lowercase__ : Optional[int] = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : int = attention_type lowercase__ : Optional[int] = initializer_range lowercase__ : Optional[int] = scope lowercase__ : Optional[Any] = num_labels # in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token lowercase__ : Optional[int] = (image_size // patch_size) ** 2 lowercase__ : Tuple = (num_frames) * self.num_patches_per_frame + 1 def __UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: lowercase__ : List[str] = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) lowercase__ : int = None if self.use_labels: lowercase__ : Any = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Optional[int] = self.get_config() return config, pixel_values, labels def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[Any]: lowercase__ : Dict = TimesformerConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , ) lowercase__ : int = self.num_labels return config def __UpperCamelCase ( self : Tuple , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : List[str] ) -> Optional[int]: lowercase__ : Optional[Any] = TimesformerModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Dict = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCamelCase ( self : Tuple , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any] ) -> Tuple: lowercase__ : Tuple = TimesformerForVideoClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Tuple = model(lowercase_ ) # verify the logits shape lowercase__ : Optional[int] = torch.Size((self.batch_size, self.num_labels) ) self.parent.assertEqual(result.logits.shape , lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: lowercase__ : str = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ : List[str] = config_and_inputs lowercase__ : Dict = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : int = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else () __A : Dict = ( {"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification} if is_torch_available() else {} ) __A : str = False __A : Optional[Any] = False __A : Tuple = False __A : Union[str, Any] = False def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: lowercase__ : Optional[Any] = TimesformerModelTester(self ) lowercase__ : Tuple = ConfigTester( self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Any=False ) -> Union[str, Any]: lowercase__ : Dict = copy.deepcopy(lowercase_ ) if return_labels: if model_class in get_values(lowercase_ ): lowercase__ : List[str] = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowercase_ ) return inputs_dict def __UpperCamelCase ( self : str ) -> List[Any]: self.config_tester.run_common_tests() @unittest.skip(reason="TimeSformer does not use inputs_embeds" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Dict = model_class(lowercase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) lowercase__ : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) ) def __UpperCamelCase ( self : Optional[int] ) -> List[str]: lowercase__ , lowercase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Tuple = model_class(lowercase_ ) lowercase__ : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Tuple = [*signature.parameters.keys()] lowercase__ : Any = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def __UpperCamelCase ( self : List[Any] ) -> Dict: lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_video_classification(*lowercase_ ) @slow def __UpperCamelCase ( self : Tuple ) -> Tuple: for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Any = TimesformerModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> Tuple: if not self.has_attentions: pass else: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() lowercase__ : str = True for model_class in self.all_model_classes: lowercase__ : Tuple = self.model_tester.seq_length lowercase__ : Any = self.model_tester.num_frames lowercase__ : Optional[int] = True lowercase__ : List[Any] = False lowercase__ : Tuple = True lowercase__ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : str = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : int = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] lowercase__ : Dict = True lowercase__ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : Optional[int] = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : Optional[Any] = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) lowercase__ : Optional[Any] = len(lowercase_ ) # Check attention is always last and order is fine lowercase__ : Any = True lowercase__ : List[str] = True lowercase__ : Union[str, Any] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : int = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) self.assertEqual(out_len + 1 , len(lowercase_ ) ) lowercase__ : Tuple = outputs.attentions self.assertEqual(len(lowercase_ ) , self.model_tester.num_hidden_layers ) # attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , ) def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: def check_hidden_states_output(lowercase_ : str , lowercase_ : Any , lowercase_ : str ): lowercase__ : str = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : Dict = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : Any = outputs.hidden_states lowercase__ : Any = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase__ : List[Any] = self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : str = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Tuple = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def lowercase_ ( ): lowercase__ : Optional[Any] = hf_hub_download( repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset") lowercase__ : Optional[Any] = np.load(_lowerCamelCase) return list(_lowerCamelCase) @require_torch @require_vision class snake_case_ ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : Optional[int] ) -> str: # logits were tested with a different mean and std, so we use the same here return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: lowercase__ : Optional[int] = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to( lowercase_ ) lowercase__ : str = self.default_image_processor lowercase__ : str = prepare_video() lowercase__ : Any = image_processor(video[:8] , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase__ : Optional[int] = model(**lowercase_ ) # verify the logits lowercase__ : Optional[Any] = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase__ : List[str] = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
87
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
1
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs UpperCamelCase = imread(R'''digital_image_processing/image_data/lena_small.jpg''') UpperCamelCase = cvtColor(img, COLOR_BGR2GRAY) def lowercase_ ( ): lowercase__ : Optional[Any] = cn.convert_to_negative(_lowerCamelCase) # assert negative_img array for at least one True assert negative_img.any() def lowercase_ ( ): with Image.open("digital_image_processing/image_data/lena_small.jpg") as img: # Work around assertion for response assert str(cc.change_contrast(_lowerCamelCase , 110)).startswith( "<PIL.Image.Image image mode=RGB size=100x100 at") def lowercase_ ( ): lowercase__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4) # Assert ambiguous array assert resp.all() def lowercase_ ( ): lowercase__ : Any = imread("digital_image_processing/image_data/lena_small.jpg" , 0) # assert ambiguous array for all == True assert canny_img.all() lowercase__ : Optional[Any] = canny.canny(_lowerCamelCase) # assert canny array for at least one True assert canny_array.any() def lowercase_ ( ): assert gg.gaussian_filter(_lowerCamelCase , 5 , sigma=0.9).all() def lowercase_ ( ): # laplace diagonals lowercase__ : Union[str, Any] = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]]) lowercase__ : List[Any] = conv.img_convolve(_lowerCamelCase , _lowerCamelCase).astype(_lowerCamelCase) assert res.any() def lowercase_ ( ): assert med.median_filter(_lowerCamelCase , 3).any() def lowercase_ ( ): lowercase__ , lowercase__ : List[Any] = sob.sobel_filter(_lowerCamelCase) assert grad.any() and theta.any() def lowercase_ ( ): lowercase__ : int = sp.make_sepia(_lowerCamelCase , 20) assert sepia.all() def lowercase_ ( _lowerCamelCase : str = "digital_image_processing/image_data/lena_small.jpg"): lowercase__ : List[Any] = bs.Burkes(imread(_lowerCamelCase , 1) , 120) burkes.process() assert burkes.output_img.any() def lowercase_ ( _lowerCamelCase : str = "digital_image_processing/image_data/lena_small.jpg" , ): lowercase__ : Tuple = rs.NearestNeighbour(imread(_lowerCamelCase , 1) , 400 , 200) nn.process() assert nn.output.any() def lowercase_ ( ): lowercase__ : Union[str, Any] = "digital_image_processing/image_data/lena.jpg" # Reading the image and converting it to grayscale. lowercase__ : List[str] = imread(_lowerCamelCase , 0) # Test for get_neighbors_pixel function() return not None lowercase__ : int = 0 lowercase__ : Optional[Any] = 0 lowercase__ : List[Any] = image[x_coordinate][y_coordinate] lowercase__ : List[Any] = lbp.get_neighbors_pixel( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image lowercase__ : Any = np.zeros((image.shape[0], image.shape[1])) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0]): for j in range(0 , image.shape[1]): lowercase__ : List[Any] = lbp.local_binary_value(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) assert lbp_image.any()
87
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
1
def lowercase_ ( _lowerCamelCase : int): if isinstance(_lowerCamelCase , _lowerCamelCase): raise TypeError("'float' object cannot be interpreted as an integer") if isinstance(_lowerCamelCase , _lowerCamelCase): raise TypeError("'str' object cannot be interpreted as an integer") if num == 0: return "0b0" lowercase__ : Optional[Any] = False if num < 0: lowercase__ : Optional[int] = True lowercase__ : List[Any] = -num lowercase__ : list[int] = [] while num > 0: binary.insert(0 , num % 2) num >>= 1 if negative: return "-0b" + "".join(str(_lowerCamelCase) for e in binary) return "0b" + "".join(str(_lowerCamelCase) for e in binary) if __name__ == "__main__": import doctest doctest.testmod()
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BertTokenizer, BlipImageProcessor, BlipProcessor, PreTrainedTokenizerFast @require_vision class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Dict ) -> Tuple: lowercase__ : Optional[Any] = tempfile.mkdtemp() lowercase__ : int = BlipImageProcessor() lowercase__ : List[Any] = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-BertModel" ) lowercase__ : str = BlipProcessor(lowercase_ , lowercase_ ) processor.save_pretrained(self.tmpdirname ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : int ) -> List[str]: return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).tokenizer def __UpperCamelCase ( self : List[Any] , **lowercase_ : Dict ) -> Union[str, Any]: return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase_ ).image_processor def __UpperCamelCase ( self : List[Any] ) -> Dict: shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : str ) -> Optional[int]: lowercase__ : Union[str, Any] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] lowercase__ : List[str] = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : List[Any] = BlipProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase__ : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase__ : str = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) lowercase__ : List[Any] = BlipProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : Optional[Any] = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : Optional[Any] = self.prepare_image_inputs() lowercase__ : List[str] = image_processor(lowercase_ , return_tensors="np" ) lowercase__ : Dict = processor(images=lowercase_ , return_tensors="np" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ : Dict = self.get_image_processor() lowercase__ : str = self.get_tokenizer() lowercase__ : int = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : int = "lower newer" lowercase__ : Union[str, Any] = processor(text=lowercase_ ) lowercase__ : Any = tokenizer(lowercase_ , return_token_type_ids=lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Tuple = self.get_image_processor() lowercase__ : Any = self.get_tokenizer() lowercase__ : List[Any] = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : List[Any] = "lower newer" lowercase__ : Tuple = self.prepare_image_inputs() lowercase__ : Union[str, Any] = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def __UpperCamelCase ( self : Tuple ) -> str: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_tokenizer() lowercase__ : Tuple = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : List[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase__ : str = processor.batch_decode(lowercase_ ) lowercase__ : Dict = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Dict = self.get_tokenizer() lowercase__ : List[Any] = BlipProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) lowercase__ : int = "lower newer" lowercase__ : str = self.prepare_image_inputs() lowercase__ : List[Any] = processor(text=lowercase_ , images=lowercase_ ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "input_ids", "attention_mask"] )
87
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
1
from typing import List import jiwer import jiwer.transforms as tr from packaging import version import datasets from datasets.config import PY_VERSION if PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata UpperCamelCase = '''''' if version.parse(importlib_metadata.version('''jiwer''')) < version.parse('''2.3.0'''): class snake_case_ ( tr.AbstractTransform ): def __init__( self : int , lowercase_ : str = " " ) -> Optional[Any]: lowercase__ : Optional[int] = sentence_delimiter def __UpperCamelCase ( self : Any , lowercase_ : str ) -> List[str]: return list(lowercase_ ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] ) -> Union[str, Any]: lowercase__ : str = [] for sent_idx, sentence in enumerate(lowercase_ ): chars.extend(self.process_string(lowercase_ ) ) if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(lowercase_ ) - 1: chars.append(self.sentence_delimiter ) return chars UpperCamelCase = tr.Compose( [tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)] ) else: UpperCamelCase = tr.Compose( [ tr.RemoveMultipleSpaces(), tr.Strip(), tr.ReduceToSingleSentence(SENTENCE_DELIMITER), tr.ReduceToListOfListOfChars(), ] ) UpperCamelCase = '''\ @inproceedings{inproceedings, author = {Morris, Andrew and Maier, Viktoria and Green, Phil}, year = {2004}, month = {01}, pages = {}, title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.} } ''' UpperCamelCase = '''\ Character error rate (CER) is a common metric of the performance of an automatic speech recognition system. CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information. Character error rate can be computed as: CER = (S + D + I) / N = (S + D + I) / (S + D + C) where S is the number of substitutions, D is the number of deletions, I is the number of insertions, C is the number of correct characters, N is the number of characters in the reference (N=S+D+C). CER\'s output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the performance of the ASR system with a CER of 0 being a perfect score. ''' UpperCamelCase = ''' Computes CER score of transcribed segments against references. Args: references: list of references for each speech input. predictions: list of transcribtions to score. concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result. Returns: (float): the character error rate Examples: >>> predictions = ["this is the prediction", "there is an other sample"] >>> references = ["this is the reference", "there is another one"] >>> cer = datasets.load_metric("cer") >>> cer_score = cer.compute(predictions=predictions, references=references) >>> print(cer_score) 0.34146341463414637 ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Tuple ) -> Any: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/jitsi/jiwer/"] , reference_urls=[ "https://en.wikipedia.org/wiki/Word_error_rate", "https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates", ] , ) def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : List[str] , lowercase_ : Union[str, Any]=False ) -> Union[str, Any]: if concatenate_texts: return jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , )["wer"] lowercase__ : List[str] = 0 lowercase__ : Dict = 0 for prediction, reference in zip(lowercase_ , lowercase_ ): lowercase__ : Union[str, Any] = jiwer.compute_measures( lowercase_ , lowercase_ , truth_transform=lowercase_ , hypothesis_transform=lowercase_ , ) incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"] total += measures["substitutions"] + measures["deletions"] + measures["hits"] return incorrect / total
87
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable UpperCamelCase = {'''configuration_dpt''': ['''DPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DPTConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''DPTFeatureExtractor'''] UpperCamelCase = ['''DPTImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''DPT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''DPTForDepthEstimation''', '''DPTForSemanticSegmentation''', '''DPTModel''', '''DPTPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) UpperCamelCase = { '''configuration_swiftformer''': [ '''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwiftFormerConfig''', '''SwiftFormerOnnxConfig''', ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''SwiftFormerForImageClassification''', '''SwiftFormerModel''', '''SwiftFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_swiftformer import ( SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, SwiftFormerConfig, SwiftFormerOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_swiftformer import ( SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, SwiftFormerForImageClassification, SwiftFormerModel, SwiftFormerPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
1
import argparse import os import transformers from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS from .utils import logging logging.set_verbosity_info() UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = {name: getattr(transformers, name + '''Fast''') for name in SLOW_TO_FAST_CONVERTERS} def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int): if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES: raise ValueError(f'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys())}.''') if tokenizer_name is None: lowercase__ : Optional[Any] = TOKENIZER_CLASSES else: lowercase__ : int = {tokenizer_name: getattr(_lowerCamelCase , tokenizer_name + "Fast")} logger.info(f'''Loading tokenizer classes: {tokenizer_names}''') for tokenizer_name in tokenizer_names: lowercase__ : Union[str, Any] = TOKENIZER_CLASSES[tokenizer_name] lowercase__ : Optional[Any] = True if checkpoint_name is None: lowercase__ : str = list(tokenizer_class.max_model_input_sizes.keys()) else: lowercase__ : Optional[Any] = [checkpoint_name] logger.info(f'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''') for checkpoint in checkpoint_names: logger.info(f'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''') # Load tokenizer lowercase__ : Optional[Any] = tokenizer_class.from_pretrained(_lowerCamelCase , force_download=_lowerCamelCase) # Save fast tokenizer logger.info(f'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''') # For organization names we create sub-directories if "/" in checkpoint: lowercase__ , lowercase__ : Union[str, Any] = checkpoint.split("/") lowercase__ : Any = os.path.join(_lowerCamelCase , _lowerCamelCase) elif add_prefix: lowercase__ : List[str] = checkpoint lowercase__ : List[str] = dump_path else: lowercase__ : Any = None lowercase__ : str = dump_path logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''') if checkpoint in list(tokenizer.pretrained_vocab_files_map.values())[0]: lowercase__ : int = list(tokenizer.pretrained_vocab_files_map.values())[0][checkpoint] lowercase__ : int = file_path.split(_lowerCamelCase)[-1][0] if next_char == "/": lowercase__ : Union[str, Any] = os.path.join(_lowerCamelCase , _lowerCamelCase) lowercase__ : Dict = None logger.info(f'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''') lowercase__ : List[Any] = tokenizer.save_pretrained( _lowerCamelCase , legacy_format=_lowerCamelCase , filename_prefix=_lowerCamelCase) logger.info(f'''=> File names {file_names}''') for file_name in file_names: if not file_name.endswith("tokenizer.json"): os.remove(_lowerCamelCase) logger.info(f'''=> removing {file_name}''') if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--dump_path''', default=None, type=str, required=True, help='''Path to output generated fast tokenizer files.''' ) parser.add_argument( '''--tokenizer_name''', default=None, type=str, help=( f"Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will " '''download and convert all the checkpoints from AWS.''' ), ) parser.add_argument( '''--checkpoint_name''', default=None, type=str, help='''Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.''', ) parser.add_argument( '''--force_download''', action='''store_true''', help='''Re-download checkpoints.''', ) UpperCamelCase = parser.parse_args() convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
87
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
1
# coding=utf-8 # Copyright 2023 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # this script dumps information about the environment import os import platform import sys UpperCamelCase = '''3''' print('''Python version:''', sys.version) print('''OS platform:''', platform.platform()) print('''OS architecture:''', platform.machine()) try: import torch print('''Torch version:''', torch.__version__) print('''Cuda available:''', torch.cuda.is_available()) print('''Cuda version:''', torch.version.cuda) print('''CuDNN version:''', torch.backends.cudnn.version()) print('''Number of GPUs available:''', torch.cuda.device_count()) except ImportError: print('''Torch version:''', None) try: import transformers print('''transformers version:''', transformers.__version__) except ImportError: print('''transformers version:''', None)
87
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
1
from typing import Any class snake_case_ : def __init__( self : Any , lowercase_ : Any ) -> List[str]: lowercase__ : str = data lowercase__ : List[str] = None class snake_case_ : def __init__( self : Tuple ) -> List[str]: lowercase__ : List[str] = None def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: lowercase__ : Any = self.head while temp is not None: print(temp.data , end=" " ) lowercase__ : Tuple = temp.next print() def __UpperCamelCase ( self : List[Any] , lowercase_ : Any ) -> Tuple: lowercase__ : Dict = Node(lowercase_ ) lowercase__ : Optional[int] = self.head lowercase__ : Union[str, Any] = new_node def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] , lowercase_ : str ) -> Optional[int]: if node_data_a == node_data_a: return else: lowercase__ : Optional[Any] = self.head while node_a is not None and node_a.data != node_data_a: lowercase__ : str = node_a.next lowercase__ : Dict = self.head while node_a is not None and node_a.data != node_data_a: lowercase__ : Any = node_a.next if node_a is None or node_a is None: return lowercase__ , lowercase__ : Optional[int] = node_a.data, node_a.data if __name__ == "__main__": UpperCamelCase = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
87
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
1
import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel from diffusers.utils.testing_utils import ( enable_full_determinism, load_numpy, nightly, require_torch_gpu, slow, torch_device, ) from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = LDMTextToImagePipeline __A : int = TEXT_TO_IMAGE_PARAMS - { "negative_prompt", "negative_prompt_embeds", "cross_attention_kwargs", "prompt_embeds", } __A : Optional[Any] = PipelineTesterMixin.required_optional_params - { "num_images_per_prompt", "callback", "callback_steps", } __A : Any = TEXT_TO_IMAGE_BATCH_PARAMS __A : str = False def __UpperCamelCase ( self : List[str] ) -> Optional[int]: torch.manual_seed(0 ) lowercase__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , ) lowercase__ : List[str] = DDIMScheduler( beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=lowercase_ , set_alpha_to_one=lowercase_ , ) torch.manual_seed(0 ) lowercase__ : str = AutoencoderKL( block_out_channels=(32, 64) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , ) torch.manual_seed(0 ) lowercase__ : Optional[Any] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) lowercase__ : Optional[int] = CLIPTextModel(lowercase_ ) lowercase__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) lowercase__ : List[Any] = { "unet": unet, "scheduler": scheduler, "vqvae": vae, "bert": text_encoder, "tokenizer": tokenizer, } return components def __UpperCamelCase ( self : List[str] , lowercase_ : Dict , lowercase_ : Optional[int]=0 ) -> Dict: if str(lowercase_ ).startswith("mps" ): lowercase__ : int = torch.manual_seed(lowercase_ ) else: lowercase__ : List[str] = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ ) lowercase__ : List[str] = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: lowercase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator lowercase__ : str = self.get_dummy_components() lowercase__ : Any = LDMTextToImagePipeline(**lowercase_ ) pipe.to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : str = self.get_dummy_inputs(lowercase_ ) lowercase__ : Union[str, Any] = pipe(**lowercase_ ).images lowercase__ : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 16, 16, 3) lowercase__ : int = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : Union[str, Any]=torch.floataa , lowercase_ : List[Any]=0 ) -> int: lowercase__ : Dict = torch.manual_seed(lowercase_ ) lowercase__ : Union[str, Any] = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 32, 32) ) lowercase__ : Optional[Any] = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ ) lowercase__ : Optional[int] = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 3, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Dict ) -> List[str]: lowercase__ : str = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Tuple = self.get_inputs(lowercase_ ) lowercase__ : Optional[Any] = pipe(**lowercase_ ).images lowercase__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 2_56, 2_56, 3) lowercase__ : List[Any] = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] ) lowercase__ : Optional[int] = np.abs(expected_slice - image_slice ).max() assert max_diff < 1E-3 @nightly @require_torch_gpu class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def __UpperCamelCase ( self : Tuple , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=torch.floataa , lowercase_ : List[str]=0 ) -> Dict: lowercase__ : Optional[Any] = torch.manual_seed(lowercase_ ) lowercase__ : str = np.random.RandomState(lowercase_ ).standard_normal((1, 4, 32, 32) ) lowercase__ : Dict = torch.from_numpy(lowercase_ ).to(device=lowercase_ , dtype=lowercase_ ) lowercase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "latents": latents, "generator": generator, "num_inference_steps": 50, "guidance_scale": 6.0, "output_type": "numpy", } return inputs def __UpperCamelCase ( self : Tuple ) -> str: lowercase__ : Any = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(lowercase_ ) pipe.set_progress_bar_config(disable=lowercase_ ) lowercase__ : Optional[Any] = self.get_inputs(lowercase_ ) lowercase__ : Optional[int] = pipe(**lowercase_ ).images[0] lowercase__ : Union[str, Any] = load_numpy( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" ) lowercase__ : Dict = np.abs(expected_image - image ).max() assert max_diff < 1E-3
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
import shutil import tempfile import unittest import numpy as np import pytest from transformers import is_speech_available, is_vision_available from transformers.testing_utils import require_torch if is_vision_available(): from transformers import TvltImageProcessor if is_speech_available(): from transformers import TvltFeatureExtractor from transformers import TvltProcessor @require_torch class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : str ) -> List[str]: lowercase__ : List[str] = "ZinengTang/tvlt-base" lowercase__ : str = tempfile.mkdtemp() def __UpperCamelCase ( self : str , **lowercase_ : int ) -> Optional[Any]: return TvltImageProcessor.from_pretrained(self.checkpoint , **lowercase_ ) def __UpperCamelCase ( self : List[str] , **lowercase_ : Optional[Any] ) -> Tuple: return TvltFeatureExtractor.from_pretrained(self.checkpoint , **lowercase_ ) def __UpperCamelCase ( self : str ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def __UpperCamelCase ( self : int ) -> Optional[int]: lowercase__ : int = self.get_image_processor() lowercase__ : Optional[int] = self.get_feature_extractor() lowercase__ : List[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) processor.save_pretrained(self.tmpdirname ) lowercase__ : Optional[Any] = TvltProcessor.from_pretrained(self.tmpdirname ) self.assertIsInstance(processor.feature_extractor , lowercase_ ) self.assertIsInstance(processor.image_processor , lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Optional[int] = self.get_image_processor() lowercase__ : Tuple = self.get_feature_extractor() lowercase__ : List[Any] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) lowercase__ : List[str] = np.ones([1_20_00] ) lowercase__ : int = feature_extractor(lowercase_ , return_tensors="np" ) lowercase__ : Any = processor(audio=lowercase_ , return_tensors="np" ) for key in audio_dict.keys(): self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: lowercase__ : Dict = self.get_image_processor() lowercase__ : Union[str, Any] = self.get_feature_extractor() lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) lowercase__ : Optional[int] = np.ones([3, 2_24, 2_24] ) lowercase__ : Optional[Any] = image_processor(lowercase_ , return_tensors="np" ) lowercase__ : Optional[int] = processor(images=lowercase_ , return_tensors="np" ) for key in image_dict.keys(): self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 ) def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Optional[Any] = self.get_image_processor() lowercase__ : List[Any] = self.get_feature_extractor() lowercase__ : Tuple = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) lowercase__ : str = np.ones([1_20_00] ) lowercase__ : Dict = np.ones([3, 2_24, 2_24] ) lowercase__ : Any = processor(audio=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["audio_values", "audio_mask", "pixel_values", "pixel_mask"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def __UpperCamelCase ( self : List[Any] ) -> Optional[int]: lowercase__ : str = self.get_image_processor() lowercase__ : Dict = self.get_feature_extractor() lowercase__ : List[str] = TvltProcessor(image_processor=lowercase_ , feature_extractor=lowercase_ ) self.assertListEqual( processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg="`processor` and `image_processor`+`feature_extractor` model input names do not match" , )
87
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
1
from typing import Optional, Union import torch from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from ...activations import ACTaFN from ...modeling_outputs import BaseModelOutputWithPoolingAndNoAttention, ImageClassifierOutputWithNoAttention from ...modeling_utils import PreTrainedModel from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging from .configuration_mobilenet_va import MobileNetVaConfig UpperCamelCase = logging.get_logger(__name__) # General docstring UpperCamelCase = '''MobileNetV1Config''' # Base docstring UpperCamelCase = '''google/mobilenet_v1_1.0_224''' UpperCamelCase = [1, 1024, 7, 7] # Image classification docstring UpperCamelCase = '''google/mobilenet_v1_1.0_224''' UpperCamelCase = '''tabby, tabby cat''' UpperCamelCase = [ '''google/mobilenet_v1_1.0_224''', '''google/mobilenet_v1_0.75_192''', # See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1 ] def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any] , _lowerCamelCase : str=None): lowercase__ : Optional[int] = {} if isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : Any = model.mobilenet_va else: lowercase__ : List[str] = model lowercase__ : List[str] = "MobilenetV1/Conv2d_0/" lowercase__ : Any = backbone.conv_stem.convolution.weight lowercase__ : Optional[Any] = backbone.conv_stem.normalization.bias lowercase__ : Any = backbone.conv_stem.normalization.weight lowercase__ : Dict = backbone.conv_stem.normalization.running_mean lowercase__ : Optional[Any] = backbone.conv_stem.normalization.running_var for i in range(13): lowercase__ : Tuple = i + 1 lowercase__ : int = i * 2 lowercase__ : Optional[Any] = backbone.layer[pt_index] lowercase__ : Optional[int] = f'''MobilenetV1/Conv2d_{tf_index}_depthwise/''' lowercase__ : Dict = pointer.convolution.weight lowercase__ : str = pointer.normalization.bias lowercase__ : Dict = pointer.normalization.weight lowercase__ : str = pointer.normalization.running_mean lowercase__ : Dict = pointer.normalization.running_var lowercase__ : Union[str, Any] = backbone.layer[pt_index + 1] lowercase__ : str = f'''MobilenetV1/Conv2d_{tf_index}_pointwise/''' lowercase__ : int = pointer.convolution.weight lowercase__ : Optional[Any] = pointer.normalization.bias lowercase__ : Tuple = pointer.normalization.weight lowercase__ : Dict = pointer.normalization.running_mean lowercase__ : Optional[int] = pointer.normalization.running_var if isinstance(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = "MobilenetV1/Logits/Conv2d_1c_1x1/" lowercase__ : List[Any] = model.classifier.weight lowercase__ : int = model.classifier.bias return tf_to_pt_map def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Tuple): try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise # Load weights from TF model lowercase__ : Optional[Any] = tf.train.list_variables(_lowerCamelCase) lowercase__ : Any = {} for name, shape in init_vars: logger.info(f'''Loading TF weight {name} with shape {shape}''') lowercase__ : Any = tf.train.load_variable(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = array # Build TF to PyTorch weights loading map lowercase__ : int = _build_tf_to_pytorch_map(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for name, pointer in tf_to_pt_map.items(): logger.info(f'''Importing {name}''') if name not in tf_weights: logger.info(f'''{name} not in tf pre-trained weights, skipping''') continue lowercase__ : Tuple = tf_weights[name] if "depthwise_weights" in name: logger.info("Transposing depthwise") lowercase__ : int = np.transpose(_lowerCamelCase , (2, 3, 0, 1)) elif "weights" in name: logger.info("Transposing") if len(pointer.shape) == 2: # copying into linear layer lowercase__ : List[Any] = array.squeeze().transpose() else: lowercase__ : List[Any] = np.transpose(_lowerCamelCase , (3, 2, 0, 1)) if pointer.shape != array.shape: raise ValueError(f'''Pointer shape {pointer.shape} and array shape {array.shape} mismatched''') logger.info(f'''Initialize PyTorch weight {name} {array.shape}''') lowercase__ : Tuple = torch.from_numpy(_lowerCamelCase) tf_weights.pop(_lowerCamelCase , _lowerCamelCase) tf_weights.pop(name + "/RMSProp" , _lowerCamelCase) tf_weights.pop(name + "/RMSProp_1" , _lowerCamelCase) tf_weights.pop(name + "/ExponentialMovingAverage" , _lowerCamelCase) logger.info(f'''Weights not copied to PyTorch model: {", ".join(tf_weights.keys())}''') return model def lowercase_ ( _lowerCamelCase : torch.Tensor , _lowerCamelCase : nn.Convad): lowercase__ , lowercase__ : Optional[Any] = features.shape[-2:] lowercase__ , lowercase__ : int = conv_layer.stride lowercase__ , lowercase__ : Optional[Any] = conv_layer.kernel_size if in_height % stride_height == 0: lowercase__ : Union[str, Any] = max(kernel_height - stride_height , 0) else: lowercase__ : List[str] = max(kernel_height - (in_height % stride_height) , 0) if in_width % stride_width == 0: lowercase__ : List[Any] = max(kernel_width - stride_width , 0) else: lowercase__ : Optional[int] = max(kernel_width - (in_width % stride_width) , 0) lowercase__ : Tuple = pad_along_width // 2 lowercase__ : Tuple = pad_along_width - pad_left lowercase__ : List[Any] = pad_along_height // 2 lowercase__ : List[Any] = pad_along_height - pad_top lowercase__ : Dict = (pad_left, pad_right, pad_top, pad_bottom) return nn.functional.pad(_lowerCamelCase , _lowerCamelCase , "constant" , 0.0) class snake_case_ ( nn.Module ): def __init__( self : List[str] , lowercase_ : MobileNetVaConfig , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Optional[int] = 1 , lowercase_ : Optional[int] = 1 , lowercase_ : bool = False , lowercase_ : Optional[bool] = True , lowercase_ : Optional[bool or str] = True , ) -> None: super().__init__() lowercase__ : int = config if in_channels % groups != 0: raise ValueError(F'''Input channels ({in_channels}) are not divisible by {groups} groups.''' ) if out_channels % groups != 0: raise ValueError(F'''Output channels ({out_channels}) are not divisible by {groups} groups.''' ) lowercase__ : Optional[int] = 0 if config.tf_padding else int((kernel_size - 1) / 2 ) lowercase__ : str = nn.Convad( in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , stride=lowercase_ , padding=lowercase_ , groups=lowercase_ , bias=lowercase_ , padding_mode="zeros" , ) if use_normalization: lowercase__ : Dict = nn.BatchNormad( num_features=lowercase_ , eps=config.layer_norm_eps , momentum=0.99_97 , affine=lowercase_ , track_running_stats=lowercase_ , ) else: lowercase__ : Optional[int] = None if use_activation: if isinstance(lowercase_ , lowercase_ ): lowercase__ : str = ACTaFN[use_activation] elif isinstance(config.hidden_act , lowercase_ ): lowercase__ : int = ACTaFN[config.hidden_act] else: lowercase__ : Tuple = config.hidden_act else: lowercase__ : Optional[Any] = None def __UpperCamelCase ( self : Any , lowercase_ : torch.Tensor ) -> torch.Tensor: if self.config.tf_padding: lowercase__ : int = apply_tf_padding(lowercase_ , self.convolution ) lowercase__ : List[str] = self.convolution(lowercase_ ) if self.normalization is not None: lowercase__ : Any = self.normalization(lowercase_ ) if self.activation is not None: lowercase__ : Any = self.activation(lowercase_ ) return features class snake_case_ ( __A ): __A : int = MobileNetVaConfig __A : List[Any] = load_tf_weights_in_mobilenet_va __A : Tuple = "mobilenet_v1" __A : str = "pixel_values" __A : Dict = False def __UpperCamelCase ( self : Any , lowercase_ : Union[nn.Linear, nn.Convad] ) -> None: if isinstance(lowercase_ , (nn.Linear, nn.Convad) ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() elif isinstance(lowercase_ , nn.BatchNormad ): module.bias.data.zero_() module.weight.data.fill_(1.0 ) UpperCamelCase = R''' This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`MobileNetV1Config`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' UpperCamelCase = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See [`MobileNetV1ImageProcessor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "The bare MobileNetV1 model outputting raw hidden-states without any specific head on top." ,__A ,) class snake_case_ ( __A ): def __init__( self : Optional[int] , lowercase_ : MobileNetVaConfig , lowercase_ : bool = True ) -> Union[str, Any]: super().__init__(lowercase_ ) lowercase__ : str = config lowercase__ : List[Any] = 32 lowercase__ : Union[str, Any] = max(int(depth * config.depth_multiplier ) , config.min_depth ) lowercase__ : Optional[int] = MobileNetVaConvLayer( lowercase_ , in_channels=config.num_channels , out_channels=lowercase_ , kernel_size=3 , stride=2 , ) lowercase__ : Tuple = [1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 2, 1] lowercase__ : Any = nn.ModuleList() for i in range(13 ): lowercase__ : List[Any] = out_channels if strides[i] == 2 or i == 0: depth *= 2 lowercase__ : str = max(int(depth * config.depth_multiplier ) , config.min_depth ) self.layer.append( MobileNetVaConvLayer( lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=3 , stride=strides[i] , groups=lowercase_ , ) ) self.layer.append( MobileNetVaConvLayer( lowercase_ , in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=1 , ) ) lowercase__ : Optional[Any] = nn.AdaptiveAvgPoolad((1, 1) ) if add_pooling_layer else None # Initialize weights and apply final processing self.post_init() def __UpperCamelCase ( self : Any , lowercase_ : Dict ) -> Optional[int]: raise NotImplementedError @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , ) def __UpperCamelCase ( self : int , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , ) -> Union[tuple, BaseModelOutputWithPoolingAndNoAttention]: lowercase__ : str = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict if pixel_values is None: raise ValueError("You have to specify pixel_values" ) lowercase__ : Dict = self.conv_stem(lowercase_ ) lowercase__ : Union[str, Any] = () if output_hidden_states else None for i, layer_module in enumerate(self.layer ): lowercase__ : List[Any] = layer_module(lowercase_ ) if output_hidden_states: lowercase__ : Optional[Any] = all_hidden_states + (hidden_states,) lowercase__ : int = hidden_states if self.pooler is not None: lowercase__ : Any = torch.flatten(self.pooler(lowercase_ ) , start_dim=1 ) else: lowercase__ : List[str] = None if not return_dict: return tuple(v for v in [last_hidden_state, pooled_output, all_hidden_states] if v is not None ) return BaseModelOutputWithPoolingAndNoAttention( last_hidden_state=lowercase_ , pooler_output=lowercase_ , hidden_states=lowercase_ , ) @add_start_docstrings( "\n MobileNetV1 model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " ,__A ,) class snake_case_ ( __A ): def __init__( self : Optional[Any] , lowercase_ : MobileNetVaConfig ) -> None: super().__init__(lowercase_ ) lowercase__ : int = config.num_labels lowercase__ : Optional[int] = MobileNetVaModel(lowercase_ ) lowercase__ : Optional[int] = self.mobilenet_va.layer[-1].convolution.out_channels # Classifier head lowercase__ : Optional[Any] = nn.Dropout(config.classifier_dropout_prob , inplace=lowercase_ ) lowercase__ : Dict = nn.Linear(lowercase_ , config.num_labels ) if config.num_labels > 0 else nn.Identity() # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(lowercase_ ) @add_code_sample_docstrings( checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowercase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , ) def __UpperCamelCase ( self : Any , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , ) -> Union[tuple, ImageClassifierOutputWithNoAttention]: lowercase__ : Dict = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : Any = self.mobilenet_va(lowercase_ , output_hidden_states=lowercase_ , return_dict=lowercase_ ) lowercase__ : int = outputs.pooler_output if return_dict else outputs[1] lowercase__ : Any = self.classifier(self.dropout(lowercase_ ) ) lowercase__ : Optional[int] = None if labels is not None: if self.config.problem_type is None: if self.num_labels == 1: lowercase__ : Dict = "regression" elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): lowercase__ : Optional[Any] = "single_label_classification" else: lowercase__ : Optional[int] = "multi_label_classification" if self.config.problem_type == "regression": lowercase__ : Tuple = MSELoss() if self.num_labels == 1: lowercase__ : List[str] = loss_fct(logits.squeeze() , labels.squeeze() ) else: lowercase__ : Optional[int] = loss_fct(lowercase_ , lowercase_ ) elif self.config.problem_type == "single_label_classification": lowercase__ : Any = CrossEntropyLoss() lowercase__ : Optional[int] = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) ) elif self.config.problem_type == "multi_label_classification": lowercase__ : Optional[Any] = BCEWithLogitsLoss() lowercase__ : Tuple = loss_fct(lowercase_ , lowercase_ ) if not return_dict: lowercase__ : List[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return ImageClassifierOutputWithNoAttention( loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , )
87
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
1
from dataclasses import dataclass from typing import Optional import torch from torch import nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .attention import BasicTransformerBlock from .modeling_utils import ModelMixin @dataclass class snake_case_ ( __A ): __A : torch.FloatTensor class snake_case_ ( __A ,__A ): @register_to_config def __init__( self : Union[str, Any] , lowercase_ : int = 16 , lowercase_ : int = 88 , lowercase_ : Optional[int] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 1 , lowercase_ : float = 0.0 , lowercase_ : int = 32 , lowercase_ : Optional[int] = None , lowercase_ : bool = False , lowercase_ : Optional[int] = None , lowercase_ : str = "geglu" , lowercase_ : bool = True , lowercase_ : bool = True , ) -> Tuple: super().__init__() lowercase__ : Optional[int] = num_attention_heads lowercase__ : Optional[int] = attention_head_dim lowercase__ : Dict = num_attention_heads * attention_head_dim lowercase__ : int = in_channels lowercase__ : Optional[int] = torch.nn.GroupNorm(num_groups=lowercase_ , num_channels=lowercase_ , eps=1E-6 , affine=lowercase_ ) lowercase__ : Tuple = nn.Linear(lowercase_ , lowercase_ ) # 3. Define transformers blocks lowercase__ : Optional[Any] = nn.ModuleList( [ BasicTransformerBlock( lowercase_ , lowercase_ , lowercase_ , dropout=lowercase_ , cross_attention_dim=lowercase_ , activation_fn=lowercase_ , attention_bias=lowercase_ , double_self_attention=lowercase_ , norm_elementwise_affine=lowercase_ , ) for d in range(lowercase_ ) ] ) lowercase__ : List[Any] = nn.Linear(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Any , lowercase_ : Tuple=None , lowercase_ : Union[str, Any]=None , lowercase_ : List[str]=None , lowercase_ : int=1 , lowercase_ : str=None , lowercase_ : bool = True , ) -> Dict: lowercase__ , lowercase__ , lowercase__ , lowercase__ : Union[str, Any] = hidden_states.shape lowercase__ : Optional[int] = batch_frames // num_frames lowercase__ : Union[str, Any] = hidden_states lowercase__ : Optional[int] = hidden_states[None, :].reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : Optional[int] = hidden_states.permute(0 , 2 , 1 , 3 , 4 ) lowercase__ : List[Any] = self.norm(lowercase_ ) lowercase__ : List[str] = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , lowercase_ , lowercase_ ) lowercase__ : List[Any] = self.proj_in(lowercase_ ) # 2. Blocks for block in self.transformer_blocks: lowercase__ : Optional[Any] = block( lowercase_ , encoder_hidden_states=lowercase_ , timestep=lowercase_ , cross_attention_kwargs=lowercase_ , class_labels=lowercase_ , ) # 3. Output lowercase__ : Optional[int] = self.proj_out(lowercase_ ) lowercase__ : List[Any] = ( hidden_states[None, None, :] .reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) .permute(0 , 3 , 4 , 1 , 2 ) .contiguous() ) lowercase__ : Any = hidden_states.reshape(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) lowercase__ : List[str] = hidden_states + residual if not return_dict: return (output,) return TransformerTemporalModelOutput(sample=lowercase_ )
87
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
1
import copy from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto.configuration_auto import CONFIG_MAPPING UpperCamelCase = logging.get_logger(__name__) class snake_case_ ( __A ): __A : Dict = "upernet" def __init__( self : List[str] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=5_12 , lowercase_ : str=0.02 , lowercase_ : Optional[int]=[1, 2, 3, 6] , lowercase_ : List[str]=True , lowercase_ : Dict=0.4 , lowercase_ : Optional[Any]=3_84 , lowercase_ : Optional[int]=2_56 , lowercase_ : int=1 , lowercase_ : str=False , lowercase_ : Optional[int]=2_55 , **lowercase_ : Union[str, Any] , ) -> List[Any]: super().__init__(**lowercase_ ) if backbone_config is None: logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." ) lowercase__ : str = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] ) elif isinstance(lowercase_ , lowercase_ ): lowercase__ : List[Any] = backbone_config.get("model_type" ) lowercase__ : List[str] = CONFIG_MAPPING[backbone_model_type] lowercase__ : Tuple = config_class.from_dict(lowercase_ ) lowercase__ : int = backbone_config lowercase__ : str = hidden_size lowercase__ : Dict = initializer_range lowercase__ : int = pool_scales lowercase__ : int = use_auxiliary_head lowercase__ : List[str] = auxiliary_loss_weight lowercase__ : Union[str, Any] = auxiliary_in_channels lowercase__ : Tuple = auxiliary_channels lowercase__ : List[Any] = auxiliary_num_convs lowercase__ : Tuple = auxiliary_concat_input lowercase__ : Optional[Any] = loss_ignore_index def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: lowercase__ : Union[str, Any] = copy.deepcopy(self.__dict__ ) lowercase__ : Dict = self.backbone_config.to_dict() lowercase__ : List[Any] = self.__class__.model_type return output
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
1
import warnings from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401 warnings.warn( '''The `inpainting.py` script is outdated. Please use directly `from diffusers import''' ''' StableDiffusionInpaintPipeline` instead.''' )
87
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
1
from collections import Counter from pathlib import Path from typing import Optional, Tuple import yaml class snake_case_ ( yaml.SafeLoader ): def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str ) -> Any: lowercase__ : Union[str, Any] = [self.constructed_objects[key_node] for key_node, _ in node.value] lowercase__ : Optional[int] = [tuple(lowercase_ ) if isinstance(lowercase_ , lowercase_ ) else key for key in keys] lowercase__ : Optional[Any] = Counter(lowercase_ ) lowercase__ : Any = [key for key in counter if counter[key] > 1] if duplicate_keys: raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Optional[Any] , lowercase_ : Optional[Any]=False ) -> Dict: lowercase__ : str = super().construct_mapping(lowercase_ , deep=lowercase_ ) self._check_no_duplicates_on_constructed_node(lowercase_ ) return mapping def lowercase_ ( _lowerCamelCase : str): lowercase__ : Any = list(readme_content.splitlines()) if full_content and full_content[0] == "---" and "---" in full_content[1:]: lowercase__ : List[str] = full_content[1:].index("---") + 1 lowercase__ : str = "\n".join(full_content[1:sep_idx]) return yamlblock, "\n".join(full_content[sep_idx + 1 :]) return None, "\n".join(_lowerCamelCase) class snake_case_ ( __A ): # class attributes __A : Dict = {"train_eval_index"} # train-eval-index in the YAML metadata @classmethod def __UpperCamelCase ( cls : List[str] , lowercase_ : Path ) -> "DatasetMetadata": with open(lowercase_ , encoding="utf-8" ) as readme_file: lowercase__ , lowercase__ : List[str] = _split_yaml_from_readme(readme_file.read() ) if yaml_string is not None: return cls.from_yaml_string(lowercase_ ) else: return cls() def __UpperCamelCase ( self : Dict , lowercase_ : Path ) -> Optional[int]: if path.exists(): with open(lowercase_ , encoding="utf-8" ) as readme_file: lowercase__ : Optional[Any] = readme_file.read() else: lowercase__ : Any = None lowercase__ : Any = self._to_readme(lowercase_ ) with open(lowercase_ , "w" , encoding="utf-8" ) as readme_file: readme_file.write(lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : Optional[str] = None ) -> str: if readme_content is not None: lowercase__ , lowercase__ : Tuple = _split_yaml_from_readme(lowercase_ ) lowercase__ : str = "---\n" + self.to_yaml_string() + "---\n" + content else: lowercase__ : Optional[int] = "---\n" + self.to_yaml_string() + "---\n" return full_content @classmethod def __UpperCamelCase ( cls : List[str] , lowercase_ : str ) -> "DatasetMetadata": lowercase__ : Optional[int] = yaml.load(lowercase_ , Loader=_NoDuplicateSafeLoader ) or {} # Convert the YAML keys to DatasetMetadata fields lowercase__ : int = { (key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value for key, value in metadata_dict.items() } return cls(**lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] ) -> str: return yaml.safe_dump( { (key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value for key, value in self.items() } , sort_keys=lowercase_ , allow_unicode=lowercase_ , encoding="utf-8" , ).decode("utf-8" ) UpperCamelCase = { '''image-classification''': [], '''translation''': [], '''image-segmentation''': [], '''fill-mask''': [], '''automatic-speech-recognition''': [], '''token-classification''': [], '''sentence-similarity''': [], '''audio-classification''': [], '''question-answering''': [], '''summarization''': [], '''zero-shot-classification''': [], '''table-to-text''': [], '''feature-extraction''': [], '''other''': [], '''multiple-choice''': [], '''text-classification''': [], '''text-to-image''': [], '''text2text-generation''': [], '''zero-shot-image-classification''': [], '''tabular-classification''': [], '''tabular-regression''': [], '''image-to-image''': [], '''tabular-to-text''': [], '''unconditional-image-generation''': [], '''text-retrieval''': [], '''text-to-speech''': [], '''object-detection''': [], '''audio-to-audio''': [], '''text-generation''': [], '''conversational''': [], '''table-question-answering''': [], '''visual-question-answering''': [], '''image-to-text''': [], '''reinforcement-learning''': [], '''voice-activity-detection''': [], '''time-series-forecasting''': [], '''document-question-answering''': [], } if __name__ == "__main__": from argparse import ArgumentParser UpperCamelCase = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''') ap.add_argument('''readme_filepath''') UpperCamelCase = ap.parse_args() UpperCamelCase = Path(args.readme_filepath) UpperCamelCase = DatasetMetadata.from_readme(readme_filepath) print(dataset_metadata) dataset_metadata.to_readme(readme_filepath)
87
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
1
def lowercase_ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int]): # 1. Validate that path exists between current and next vertices if graph[path[curr_ind - 1]][next_ver] == 0: return False # 2. Validate that next vertex is not already in path return not any(vertex == next_ver for vertex in path) def lowercase_ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : list[int] , _lowerCamelCase : int): # Base Case if curr_ind == len(_lowerCamelCase): # return whether path exists between current and starting vertices return graph[path[curr_ind - 1]][path[0]] == 1 # Recursive Step for next_ver in range(0 , len(_lowerCamelCase)): if valid_connection(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase): # Insert current vertex into path as next transition lowercase__ : Any = next_ver # Validate created path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , curr_ind + 1): return True # Backtrack lowercase__ : Optional[Any] = -1 return False def lowercase_ ( _lowerCamelCase : list[list[int]] , _lowerCamelCase : int = 0): lowercase__ : Optional[Any] = [-1] * (len(_lowerCamelCase) + 1) # initialize start and end of path with starting index lowercase__ : List[Any] = start_index # evaluate and if we find answer return path either return empty array return path if util_hamilton_cycle(_lowerCamelCase , _lowerCamelCase , 1) else []
87
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
1
import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py UpperCamelCase = '''src/diffusers''' # Matches is_xxx_available() UpperCamelCase = re.compile(R'''is\_([a-z_]*)_available\(\)''') # Matches from xxx import bla UpperCamelCase = re.compile(R'''\s+from\s+\S*\s+import\s+([^\(\s].*)\n''') UpperCamelCase = ''' {0} = None ''' UpperCamelCase = ''' class {0}(metaclass=DummyObject): _backends = {1} def __init__(self, *args, **kwargs): requires_backends(self, {1}) @classmethod def from_config(cls, *args, **kwargs): requires_backends(cls, {1}) @classmethod def from_pretrained(cls, *args, **kwargs): requires_backends(cls, {1}) ''' UpperCamelCase = ''' def {0}(*args, **kwargs): requires_backends({0}, {1}) ''' def lowercase_ ( _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = _re_backend.findall(_lowerCamelCase) if len(_lowerCamelCase) == 0: return None return "_and_".join(_lowerCamelCase) def lowercase_ ( ): with open(os.path.join(_lowerCamelCase , "__init__.py") , "r" , encoding="utf-8" , newline="\n") as f: lowercase__ : Any = f.readlines() # Get to the point we do the actual imports for type checking lowercase__ : List[Any] = 0 lowercase__ : Tuple = {} # Go through the end of the file while line_index < len(_lowerCamelCase): # If the line contains is_backend_available, we grab all objects associated with the `else` block lowercase__ : Tuple = find_backend(lines[line_index]) if backend is not None: while not lines[line_index].startswith("else:"): line_index += 1 line_index += 1 lowercase__ : str = [] # Until we unindent, add backend objects to the list while line_index < len(_lowerCamelCase) and len(lines[line_index]) > 1: lowercase__ : Any = lines[line_index] lowercase__ : Union[str, Any] = _re_single_line_import.search(_lowerCamelCase) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", ")) elif line.startswith(" " * 8): objects.append(line[8:-2]) line_index += 1 if len(_lowerCamelCase) > 0: lowercase__ : List[str] = objects else: line_index += 1 return backend_specific_objects def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : int): if name.isupper(): return DUMMY_CONSTANT.format(_lowerCamelCase) elif name.islower(): return DUMMY_FUNCTION.format(_lowerCamelCase , _lowerCamelCase) else: return DUMMY_CLASS.format(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any]=None): if backend_specific_objects is None: lowercase__ : Union[str, Any] = read_init() # For special correspondence backend to module name as used in the function requires_modulename lowercase__ : Dict = {} for backend, objects in backend_specific_objects.items(): lowercase__ : Any = "[" + ", ".join(f'''"{b}"''' for b in backend.split("_and_")) + "]" lowercase__ : Union[str, Any] = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(_lowerCamelCase , _lowerCamelCase) for o in objects]) lowercase__ : Tuple = dummy_file return dummy_files def lowercase_ ( _lowerCamelCase : int=False): lowercase__ : Optional[Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py lowercase__ : Tuple = {"torch": "pt"} # Locate actual dummy modules and read their content. lowercase__ : str = os.path.join(_lowerCamelCase , "utils") lowercase__ : int = { backend: os.path.join(_lowerCamelCase , f'''dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase)}_objects.py''') for backend in dummy_files.keys() } lowercase__ : Tuple = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(_lowerCamelCase): with open(_lowerCamelCase , "r" , encoding="utf-8" , newline="\n") as f: lowercase__ : Optional[Any] = f.read() else: lowercase__ : str = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f'''Updating diffusers.utils.dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase)}_objects.py as the main ''' "__init__ has new objects.") with open(dummy_file_paths[backend] , "w" , encoding="utf-8" , newline="\n") as f: f.write(dummy_files[backend]) else: raise ValueError( "The main __init__ has objects that are not present in " f'''diffusers.utils.dummy_{short_names.get(_lowerCamelCase , _lowerCamelCase)}_objects.py. Run `make fix-copies` ''' "to fix this.") if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') UpperCamelCase = parser.parse_args() check_dummies(args.fix_and_overwrite)
87
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
1
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401 from coval.conll import reader, util from coval.eval import evaluator import datasets UpperCamelCase = datasets.logging.get_logger(__name__) UpperCamelCase = '''\ @InProceedings{moosavi2019minimum, author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube}, title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection}, year = {2019}, booktitle = {Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)}, publisher = {Association for Computational Linguistics}, address = {Florence, Italy}, } @inproceedings{10.3115/1072399.1072405, author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette}, title = {A Model-Theoretic Coreference Scoring Scheme}, year = {1995}, isbn = {1558604022}, publisher = {Association for Computational Linguistics}, address = {USA}, url = {https://doi.org/10.3115/1072399.1072405}, doi = {10.3115/1072399.1072405}, booktitle = {Proceedings of the 6th Conference on Message Understanding}, pages = {45–52}, numpages = {8}, location = {Columbia, Maryland}, series = {MUC6 ’95} } @INPROCEEDINGS{Bagga98algorithmsfor, author = {Amit Bagga and Breck Baldwin}, title = {Algorithms for Scoring Coreference Chains}, booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference}, year = {1998}, pages = {563--566} } @INPROCEEDINGS{Luo05oncoreference, author = {Xiaoqiang Luo}, title = {On coreference resolution performance metrics}, booktitle = {In Proc. of HLT/EMNLP}, year = {2005}, pages = {25--32}, publisher = {URL} } @inproceedings{moosavi-strube-2016-coreference, title = "Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric", author = "Moosavi, Nafise Sadat and Strube, Michael", booktitle = "Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)", month = aug, year = "2016", address = "Berlin, Germany", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/P16-1060", doi = "10.18653/v1/P16-1060", pages = "632--642", } ''' UpperCamelCase = '''\ CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which implements of the common evaluation metrics including MUC [Vilain et al, 1995], B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005], LEA [Moosavi and Strube, 2016] and the averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) [Denis and Baldridge, 2009a; Pradhan et al., 2011]. This wrapper of CoVal currently only work with CoNLL line format: The CoNLL format has one word per line with all the annotation for this word in column separated by spaces: Column Type Description 1 Document ID This is a variation on the document filename 2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc. 3 Word number 4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release. 5 Part-of-Speech 6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the "([pos] [word])" string (or leaf) and concatenating the items in the rows of that column. 7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a "-" 8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7. 9 Word sense This is the word sense of the word in Column 3. 10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data. 11 Named Entities These columns identifies the spans representing various named entities. 12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7. N Coreference Coreference chain information encoded in a parenthesis structure. More informations on the format can be found here (section "*_conll File Format"): http://www.conll.cemantix.org/2012/data.html Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md CoVal code was written by @ns-moosavi. Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py The test suite is taken from https://github.com/conll/reference-coreference-scorers/ Mention evaluation and the test suite are added by @andreasvc. Parsing CoNLL files is developed by Leo Born. ''' UpperCamelCase = ''' Calculates coreference evaluation metrics. Args: predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format. Each prediction is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format. Each reference is a word with its annotations as a string made of columns joined with spaces. Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation) See the details on the format in the description of the metric. keep_singletons: After extracting all mentions of key or system files, mentions whose corresponding coreference chain is of size one, are considered as singletons. The default evaluation mode will include singletons in evaluations if they are included in the key or the system files. By setting \'keep_singletons=False\', all singletons in the key and system files will be excluded from the evaluation. NP_only: Most of the recent coreference resolvers only resolve NP mentions and leave out the resolution of VPs. By setting the \'NP_only\' option, the scorer will only evaluate the resolution of NPs. min_span: By setting \'min_span\', the scorer reports the results based on automatically detected minimum spans. Minimum spans are determined using the MINA algorithm. Returns: \'mentions\': mentions \'muc\': MUC metric [Vilain et al, 1995] \'bcub\': B-cubed [Bagga and Baldwin, 1998] \'ceafe\': CEAFe [Luo et al., 2005] \'lea\': LEA [Moosavi and Strube, 2016] \'conll_score\': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe) Examples: >>> coval = datasets.load_metric(\'coval\') >>> words = [\'bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -\', ... \'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)\', ... \'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)\', ... \'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -\', ... \'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -\', ... \'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -\'] >>> references = [words] >>> predictions = [words] >>> results = coval.compute(predictions=predictions, references=references) >>> print(results) # doctest:+ELLIPSIS {\'mentions/recall\': 1.0,[...] \'conll_score\': 100.0} ''' def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : int , _lowerCamelCase : Dict=False , _lowerCamelCase : List[Any]=False , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[int]="dummy_doc"): lowercase__ : Dict = {doc: key_lines} lowercase__ : Optional[int] = {doc: sys_lines} lowercase__ : Tuple = {} lowercase__ : Union[str, Any] = 0 lowercase__ : str = 0 lowercase__ : Tuple = 0 lowercase__ : Optional[int] = 0 lowercase__ : str = 0 lowercase__ : str = 0 lowercase__ , lowercase__ : Union[str, Any] = reader.get_doc_mentions(_lowerCamelCase , key_doc_lines[doc] , _lowerCamelCase) key_singletons_num += singletons_num if NP_only or min_span: lowercase__ : int = reader.set_annotated_parse_trees(_lowerCamelCase , key_doc_lines[doc] , _lowerCamelCase , _lowerCamelCase) lowercase__ , lowercase__ : Union[str, Any] = reader.get_doc_mentions(_lowerCamelCase , sys_doc_lines[doc] , _lowerCamelCase) sys_singletons_num += singletons_num if NP_only or min_span: lowercase__ : Optional[int] = reader.set_annotated_parse_trees(_lowerCamelCase , key_doc_lines[doc] , _lowerCamelCase , _lowerCamelCase) if remove_nested: lowercase__ , lowercase__ : List[str] = reader.remove_nested_coref_mentions(_lowerCamelCase , _lowerCamelCase) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters lowercase__ , lowercase__ : List[Any] = reader.remove_nested_coref_mentions(_lowerCamelCase , _lowerCamelCase) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters lowercase__ : List[str] = reader.get_mention_assignments(_lowerCamelCase , _lowerCamelCase) lowercase__ : Tuple = reader.get_mention_assignments(_lowerCamelCase , _lowerCamelCase) lowercase__ : str = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " f'''annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}''') logger.info( "Number of resulting singleton clusters in the key " f'''annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}''') if not keep_singletons: logger.info( f'''{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system ''' "files, respectively") return doc_coref_infos def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : str , _lowerCamelCase : List[str] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple): lowercase__ : Tuple = get_coref_infos(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) lowercase__ : List[Any] = {} lowercase__ : List[str] = 0 lowercase__ : List[Any] = 0 for name, metric in metrics: lowercase__ , lowercase__ , lowercase__ : Dict = evaluator.evaluate_documents(_lowerCamelCase , _lowerCamelCase , beta=1) if name in ["muc", "bcub", "ceafe"]: conll += fa conll_subparts_num += 1 output_scores.update({f'''{name}/recall''': recall, f'''{name}/precision''': precision, f'''{name}/f1''': fa}) logger.info( name.ljust(10) , f'''Recall: {recall * 100:.2f}''' , f''' Precision: {precision * 100:.2f}''' , f''' F1: {fa * 100:.2f}''' , ) if conll_subparts_num == 3: lowercase__ : str = (conll / 3) * 100 logger.info(f'''CoNLL score: {conll:.2f}''') output_scores.update({"conll_score": conll}) return output_scores def lowercase_ ( _lowerCamelCase : Dict): lowercase__ : List[str] = False for line in key_lines: if not line.startswith("#"): if len(line.split()) > 6: lowercase__ : Union[str, Any] = line.split()[5] if not parse_col == "-": lowercase__ : str = True break else: break return has_gold_parse @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Optional[int] ) -> Optional[int]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Sequence(datasets.Value("string" ) ), "references": datasets.Sequence(datasets.Value("string" ) ), } ) , codebase_urls=["https://github.com/ns-moosavi/coval"] , reference_urls=[ "https://github.com/ns-moosavi/coval", "https://www.aclweb.org/anthology/P16-1060", "http://www.conll.cemantix.org/2012/data.html", ] , ) def __UpperCamelCase ( self : Tuple , lowercase_ : Tuple , lowercase_ : Any , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=False , lowercase_ : Tuple=False , lowercase_ : Any=False ) -> Dict: lowercase__ : int = [ ("mentions", evaluator.mentions), ("muc", evaluator.muc), ("bcub", evaluator.b_cubed), ("ceafe", evaluator.ceafe), ("lea", evaluator.lea), ] if min_span: lowercase__ : Optional[int] = util.check_gold_parse_annotation(lowercase_ ) if not has_gold_parse: raise NotImplementedError("References should have gold parse annotation to use 'min_span'." ) # util.parse_key_file(key_file) # key_file = key_file + ".parsed" lowercase__ : Any = evaluate( key_lines=lowercase_ , sys_lines=lowercase_ , metrics=lowercase_ , NP_only=lowercase_ , remove_nested=lowercase_ , keep_singletons=lowercase_ , min_span=lowercase_ , ) return score
87
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = { '''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''', '''YituTech/conv-bert-medium-small''': ( '''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json''' ), '''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''', # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class snake_case_ ( __A ): __A : List[str] = "convbert" def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict: super().__init__( pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , ) lowercase__ : List[str] = vocab_size lowercase__ : Union[str, Any] = hidden_size lowercase__ : Any = num_hidden_layers lowercase__ : List[str] = num_attention_heads lowercase__ : Union[str, Any] = intermediate_size lowercase__ : Optional[Any] = hidden_act lowercase__ : int = hidden_dropout_prob lowercase__ : str = attention_probs_dropout_prob lowercase__ : Union[str, Any] = max_position_embeddings lowercase__ : Optional[int] = type_vocab_size lowercase__ : Tuple = initializer_range lowercase__ : List[str] = layer_norm_eps lowercase__ : List[Any] = embedding_size lowercase__ : Optional[Any] = head_ratio lowercase__ : Dict = conv_kernel_size lowercase__ : Tuple = num_groups lowercase__ : Optional[int] = classifier_dropout class snake_case_ ( __A ): @property def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"} else: lowercase__ : str = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
87
1
from typing import List, Optional, Tuple, Union import torch from torch import nn from torch.nn import CrossEntropyLoss from ... import AutoBackbone from ...modeling_outputs import SemanticSegmenterOutput from ...modeling_utils import PreTrainedModel from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings from ...utils.backbone_utils import BackboneMixin from .configuration_upernet import UperNetConfig UpperCamelCase = [ '''openmmlab/upernet-convnext-tiny''', # See all UperNet models at https://huggingface.co/models?filter=upernet ] # General docstring UpperCamelCase = '''UperNetConfig''' class snake_case_ ( nn.Module ): def __init__( self : int , lowercase_ : int , lowercase_ : int , lowercase_ : Union[int, Tuple[int, int]] , lowercase_ : Union[int, Tuple[int, int], str] = 0 , lowercase_ : bool = False , lowercase_ : Union[int, Tuple[int, int]] = 1 , ) -> None: super().__init__() lowercase__ : str = nn.Convad( in_channels=lowercase_ , out_channels=lowercase_ , kernel_size=lowercase_ , padding=lowercase_ , bias=lowercase_ , dilation=lowercase_ , ) lowercase__ : Dict = nn.BatchNormad(lowercase_ ) lowercase__ : Any = nn.ReLU() def __UpperCamelCase ( self : List[Any] , lowercase_ : torch.Tensor ) -> torch.Tensor: lowercase__ : Optional[Any] = self.conv(lowercase_ ) lowercase__ : List[Any] = self.batch_norm(lowercase_ ) lowercase__ : Optional[int] = self.activation(lowercase_ ) return output class snake_case_ ( nn.Module ): def __init__( self : Dict , lowercase_ : int , lowercase_ : int , lowercase_ : int ) -> None: super().__init__() lowercase__ : List[Any] = [ nn.AdaptiveAvgPoolad(lowercase_ ), UperNetConvModule(lowercase_ , lowercase_ , kernel_size=1 ), ] for i, layer in enumerate(self.layers ): self.add_module(str(lowercase_ ) , lowercase_ ) def __UpperCamelCase ( self : Dict , lowercase_ : torch.Tensor ) -> torch.Tensor: lowercase__ : Any = input for layer in self.layers: lowercase__ : Tuple = layer(lowercase_ ) return hidden_state class snake_case_ ( nn.Module ): def __init__( self : Tuple , lowercase_ : Tuple[int, ...] , lowercase_ : int , lowercase_ : int , lowercase_ : bool ) -> None: super().__init__() lowercase__ : List[Any] = pool_scales lowercase__ : int = align_corners lowercase__ : str = in_channels lowercase__ : Optional[Any] = channels lowercase__ : Optional[int] = [] for i, pool_scale in enumerate(lowercase_ ): lowercase__ : Dict = UperNetPyramidPoolingBlock(pool_scale=lowercase_ , in_channels=lowercase_ , channels=lowercase_ ) self.blocks.append(lowercase_ ) self.add_module(str(lowercase_ ) , lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : torch.Tensor ) -> List[torch.Tensor]: lowercase__ : List[str] = [] for ppm in self.blocks: lowercase__ : Optional[int] = ppm(lowercase_ ) lowercase__ : Optional[Any] = nn.functional.interpolate( lowercase_ , size=x.size()[2:] , mode="bilinear" , align_corners=self.align_corners ) ppm_outs.append(lowercase_ ) return ppm_outs class snake_case_ ( nn.Module ): def __init__( self : List[Any] , lowercase_ : List[str] , lowercase_ : Optional[Any] ) -> Dict: super().__init__() lowercase__ : Any = config lowercase__ : Tuple = config.pool_scales # e.g. (1, 2, 3, 6) lowercase__ : List[Any] = in_channels lowercase__ : Union[str, Any] = config.hidden_size lowercase__ : List[str] = False lowercase__ : int = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) # PSP Module lowercase__ : int = UperNetPyramidPoolingModule( self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , ) lowercase__ : str = UperNetConvModule( self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) # FPN Module lowercase__ : str = nn.ModuleList() lowercase__ : Optional[int] = nn.ModuleList() for in_channels in self.in_channels[:-1]: # skip the top layer lowercase__ : List[Any] = UperNetConvModule(lowercase_ , self.channels , kernel_size=1 ) lowercase__ : Optional[Any] = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 ) self.lateral_convs.append(lowercase_ ) self.fpn_convs.append(lowercase_ ) lowercase__ : List[Any] = UperNetConvModule( len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: self.apply(self._init_weights ) def __UpperCamelCase ( self : str , lowercase_ : Dict ) -> str: if isinstance(lowercase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __UpperCamelCase ( self : str , lowercase_ : Union[str, Any] ) -> int: lowercase__ : Optional[Any] = inputs[-1] lowercase__ : Optional[int] = [x] psp_outs.extend(self.psp_modules(lowercase_ ) ) lowercase__ : Any = torch.cat(lowercase_ , dim=1 ) lowercase__ : str = self.bottleneck(lowercase_ ) return output def __UpperCamelCase ( self : str , lowercase_ : torch.Tensor ) -> torch.Tensor: # build laterals lowercase__ : Optional[Any] = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )] laterals.append(self.psp_forward(lowercase_ ) ) # build top-down path lowercase__ : List[str] = len(lowercase_ ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowercase__ : List[str] = laterals[i - 1].shape[2:] lowercase__ : Dict = laterals[i - 1] + nn.functional.interpolate( laterals[i] , size=lowercase_ , mode="bilinear" , align_corners=self.align_corners ) # build outputs lowercase__ : List[Any] = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )] # append psp feature fpn_outs.append(laterals[-1] ) for i in range(used_backbone_levels - 1 , 0 , -1 ): lowercase__ : str = nn.functional.interpolate( fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode="bilinear" , align_corners=self.align_corners ) lowercase__ : str = torch.cat(lowercase_ , dim=1 ) lowercase__ : Dict = self.fpn_bottleneck(lowercase_ ) lowercase__ : int = self.classifier(lowercase_ ) return output class snake_case_ ( nn.Module ): def __init__( self : Union[str, Any] , lowercase_ : Optional[Any] , lowercase_ : int = 2 , lowercase_ : int = 3 , lowercase_ : Union[int, Tuple[int, int]] = 1 ) -> None: super().__init__() lowercase__ : Dict = config lowercase__ : Optional[Any] = config.auxiliary_in_channels lowercase__ : int = config.auxiliary_channels lowercase__ : List[str] = config.auxiliary_num_convs lowercase__ : Dict = config.auxiliary_concat_input lowercase__ : Any = in_index lowercase__ : List[Any] = (kernel_size // 2) * dilation lowercase__ : Union[str, Any] = [] convs.append( UperNetConvModule( self.in_channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) ) for i in range(self.num_convs - 1 ): convs.append( UperNetConvModule( self.channels , self.channels , kernel_size=lowercase_ , padding=lowercase_ , dilation=lowercase_ ) ) if self.num_convs == 0: lowercase__ : Optional[Any] = nn.Identity() else: lowercase__ : Union[str, Any] = nn.Sequential(*lowercase_ ) if self.concat_input: lowercase__ : Optional[Any] = UperNetConvModule( self.in_channels + self.channels , self.channels , kernel_size=lowercase_ , padding=kernel_size // 2 ) lowercase__ : Any = nn.Convad(self.channels , config.num_labels , kernel_size=1 ) def __UpperCamelCase ( self : int ) -> str: self.apply(self._init_weights ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str ) -> Any: if isinstance(lowercase_ , nn.Convad ): module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range ) if module.bias is not None: module.bias.data.zero_() def __UpperCamelCase ( self : Optional[int] , lowercase_ : torch.Tensor ) -> torch.Tensor: # just take the relevant feature maps lowercase__ : Tuple = encoder_hidden_states[self.in_index] lowercase__ : Tuple = self.convs(lowercase_ ) if self.concat_input: lowercase__ : List[str] = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) ) lowercase__ : int = self.classifier(lowercase_ ) return output class snake_case_ ( __A ): __A : int = UperNetConfig __A : Union[str, Any] = "pixel_values" __A : Tuple = True def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Union[str, Any] ) -> Any: if isinstance(lowercase_ , lowercase_ ): module.backbone.init_weights() module.decode_head.init_weights() module.auxiliary_head.init_weights() def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: self.backbone.init_weights() self.decode_head.init_weights() self.auxiliary_head.init_weights() def __UpperCamelCase ( self : int , lowercase_ : int , lowercase_ : List[str]=False ) -> Any: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Union[str, Any] = value UpperCamelCase = R''' Parameters: This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. config ([`UperNetConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. ''' UpperCamelCase = R''' Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See `attentions` under returned tensors for more detail. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. ''' @add_start_docstrings( "UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes." ,__A ,) class snake_case_ ( __A ): def __init__( self : str , lowercase_ : Union[str, Any] ) -> List[Any]: super().__init__(lowercase_ ) lowercase__ : str = AutoBackbone.from_config(config.backbone_config ) # Semantic segmentation head(s) lowercase__ : Any = UperNetHead(lowercase_ , in_channels=self.backbone.channels ) lowercase__ : List[str] = UperNetFCNHead(lowercase_ ) if config.use_auxiliary_head else None # Initialize weights and apply final processing self.post_init() @add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format("batch_size, sequence_length" ) ) @replace_return_docstrings(output_type=lowercase_ , config_class=_CONFIG_FOR_DOC ) def __UpperCamelCase ( self : Dict , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[torch.Tensor] = None , lowercase_ : Optional[bool] = None , ) -> Union[tuple, SemanticSegmenterOutput]: lowercase__ : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict lowercase__ : Tuple = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) lowercase__ : Dict = output_attentions if output_attentions is not None else self.config.output_attentions lowercase__ : str = self.backbone.forward_with_filtered_kwargs( lowercase_ , output_hidden_states=lowercase_ , output_attentions=lowercase_ ) lowercase__ : Union[str, Any] = outputs.feature_maps lowercase__ : Tuple = self.decode_head(lowercase_ ) lowercase__ : Optional[Any] = nn.functional.interpolate(lowercase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowercase_ ) lowercase__ : Dict = None if self.auxiliary_head is not None: lowercase__ : Tuple = self.auxiliary_head(lowercase_ ) lowercase__ : int = nn.functional.interpolate( lowercase_ , size=pixel_values.shape[2:] , mode="bilinear" , align_corners=lowercase_ ) lowercase__ : Optional[Any] = None if labels is not None: if self.config.num_labels == 1: raise ValueError("The number of labels should be greater than one" ) else: # compute weighted loss lowercase__ : Dict = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index ) lowercase__ : int = loss_fct(lowercase_ , lowercase_ ) lowercase__ : int = loss_fct(lowercase_ , lowercase_ ) lowercase__ : str = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss if not return_dict: if output_hidden_states: lowercase__ : Any = (logits,) + outputs[1:] else: lowercase__ : Optional[Any] = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SemanticSegmenterOutput( loss=lowercase_ , logits=lowercase_ , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
87
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict): # Initialise PyTorch model lowercase__ : List[str] = BertConfig.from_json_file(_lowerCamelCase) print(f'''Building PyTorch model from configuration: {config}''') lowercase__ : Optional[Any] = BertForPreTraining(_lowerCamelCase) # Load weights from tf checkpoint load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''') torch.save(model.state_dict() , _lowerCamelCase) if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) UpperCamelCase = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
87
1
import requests UpperCamelCase = '''https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey=''' def lowercase_ ( _lowerCamelCase : str): # fetching a list of articles in json format lowercase__ : Union[str, Any] = requests.get(_NEWS_API + bbc_news_api_key).json() # each article in the list is a dict for i, article in enumerate(bbc_news_page["articles"] , 1): print(f'''{i}.) {article["title"]}''') if __name__ == "__main__": fetch_bbc_news(bbc_news_api_key='''<Your BBC News API key goes here>''')
87
import asyncio import os import re import sys import tempfile import unittest from contextlib import contextmanager from copy import deepcopy from distutils.util import strtobool from enum import Enum from importlib.util import find_spec from pathlib import Path from unittest.mock import patch import pyarrow as pa import pytest import requests from packaging import version from datasets import config if config.PY_VERSION < version.parse('''3.8'''): import importlib_metadata else: import importlib.metadata as importlib_metadata def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str]=False): try: lowercase__ : Union[str, Any] = os.environ[key] except KeyError: # KEY isn't set, default to `default`. lowercase__ : int = default else: # KEY is set, convert it to True or False. try: lowercase__ : Optional[int] = strtobool(_lowerCamelCase) except ValueError: # More values are supported, but let's keep the message simple. raise ValueError(f'''If set, {key} must be yes or no.''') return _value UpperCamelCase = parse_flag_from_env('''RUN_SLOW''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_REMOTE''', default=False) UpperCamelCase = parse_flag_from_env('''RUN_LOCAL''', default=True) UpperCamelCase = parse_flag_from_env('''RUN_PACKAGED''', default=True) # Compression UpperCamelCase = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''') UpperCamelCase = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''') UpperCamelCase = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''') # Audio UpperCamelCase = pytest.mark.skipif( # On Windows and OS X, soundfile installs sndfile find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''), reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''', ) # Beam UpperCamelCase = pytest.mark.skipif( not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''), reason='''test requires apache-beam and a compatible dill version''', ) # Dill-cloudpickle compatibility UpperCamelCase = pytest.mark.skipif( config.DILL_VERSION <= version.parse('''0.3.2'''), reason='''test requires dill>0.3.2 for cloudpickle compatibility''', ) # Windows UpperCamelCase = pytest.mark.skipif( sys.platform == '''win32''', reason='''test should not be run on Windows''', ) def lowercase_ ( _lowerCamelCase : int): try: import faiss # noqa except ImportError: lowercase__ : Optional[Any] = unittest.skip("test requires faiss")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import regex # noqa except ImportError: lowercase__ : List[Any] = unittest.skip("test requires regex")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): try: import elasticsearch # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires elasticsearch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Union[str, Any]): try: import sqlalchemy # noqa except ImportError: lowercase__ : Optional[int] = unittest.skip("test requires sqlalchemy")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.TORCH_AVAILABLE: lowercase__ : Tuple = unittest.skip("test requires PyTorch")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not config.TF_AVAILABLE: lowercase__ : Any = unittest.skip("test requires TensorFlow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Dict): if not config.JAX_AVAILABLE: lowercase__ : List[str] = unittest.skip("test requires JAX")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not config.PIL_AVAILABLE: lowercase__ : Dict = unittest.skip("test requires Pillow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): try: import transformers # noqa F401 except ImportError: return unittest.skip("test requires transformers")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[Any]): try: import tiktoken # noqa F401 except ImportError: return unittest.skip("test requires tiktoken")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): try: import spacy # noqa F401 except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): def _require_spacy_model(_lowerCamelCase : Optional[int]): try: import spacy # noqa F401 spacy.load(_lowerCamelCase) except ImportError: return unittest.skip("test requires spacy")(_lowerCamelCase) except OSError: return unittest.skip("test requires spacy model '{}'".format(_lowerCamelCase))(_lowerCamelCase) else: return test_case return _require_spacy_model def lowercase_ ( _lowerCamelCase : Dict): try: import pyspark # noqa F401 except ImportError: return unittest.skip("test requires pyspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : List[str]): try: import joblibspark # noqa F401 except ImportError: return unittest.skip("test requires joblibspark")(_lowerCamelCase) else: return test_case def lowercase_ ( _lowerCamelCase : Dict): if not _run_slow_tests or _run_slow_tests == 0: lowercase__ : Tuple = unittest.skip("test is slow")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : int): if not _run_local_tests or _run_local_tests == 0: lowercase__ : str = unittest.skip("test is local")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Optional[int]): if not _run_packaged_tests or _run_packaged_tests == 0: lowercase__ : List[Any] = unittest.skip("test is packaged")(_lowerCamelCase) return test_case def lowercase_ ( _lowerCamelCase : Tuple): if not _run_remote_tests or _run_remote_tests == 0: lowercase__ : Union[str, Any] = unittest.skip("test requires remote")(_lowerCamelCase) return test_case def lowercase_ ( *_lowerCamelCase : str): def decorate(cls : str): for name, fn in cls.__dict__.items(): if callable(_lowerCamelCase) and name.startswith("test"): for decorator in decorators: lowercase__ : Optional[int] = decorator(_lowerCamelCase) setattr(cls , _lowerCamelCase , _lowerCamelCase) return cls return decorate class snake_case_ ( __A ): pass class snake_case_ ( __A ): __A : List[Any] = 0 __A : str = 1 __A : int = 2 @contextmanager def lowercase_ ( _lowerCamelCase : List[str]=OfflineSimulationMode.CONNECTION_FAILS , _lowerCamelCase : int=1E-16): lowercase__ : int = requests.Session().request def timeout_request(_lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Dict , **_lowerCamelCase : str): # Change the url to an invalid url so that the connection hangs lowercase__ : Any = "https://10.255.255.1" if kwargs.get("timeout") is None: raise RequestWouldHangIndefinitelyError( f'''Tried a call to {url} in offline mode with no timeout set. Please set a timeout.''') lowercase__ : Dict = timeout try: return online_request(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase) except Exception as e: # The following changes in the error are just here to make the offline timeout error prettier lowercase__ : Dict = url lowercase__ : Union[str, Any] = e.args[0] lowercase__ : Optional[Any] = (max_retry_error.args[0].replace("10.255.255.1" , f'''OfflineMock[{url}]'''),) lowercase__ : int = (max_retry_error,) raise def raise_connection_error(_lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any] , **_lowerCamelCase : Tuple): raise requests.ConnectionError("Offline mode is enabled." , request=_lowerCamelCase) if mode is OfflineSimulationMode.CONNECTION_FAILS: with patch("requests.Session.send" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT: # inspired from https://stackoverflow.com/a/904609 with patch("requests.Session.request" , _lowerCamelCase): yield elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1: with patch("datasets.config.HF_DATASETS_OFFLINE" , _lowerCamelCase): yield else: raise ValueError("Please use a value from the OfflineSimulationMode enum.") @contextmanager def lowercase_ ( *_lowerCamelCase : str , **_lowerCamelCase : Tuple): lowercase__ : Dict = str(Path().resolve()) with tempfile.TemporaryDirectory(*_lowerCamelCase , **_lowerCamelCase) as tmp_dir: try: os.chdir(_lowerCamelCase) yield finally: os.chdir(_lowerCamelCase) @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : Union[str, Any] = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase." @contextmanager def lowercase_ ( ): import gc gc.collect() lowercase__ : int = pa.total_allocated_bytes() yield assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase." def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any]): return deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() == deepcopy(_lowerCamelCase).integers(0 , 100 , 10).tolist() def lowercase_ ( _lowerCamelCase : str): import decorator from requests.exceptions import HTTPError def _wrapper(_lowerCamelCase : str , *_lowerCamelCase : Dict , **_lowerCamelCase : Dict): try: return func(*_lowerCamelCase , **_lowerCamelCase) except HTTPError as err: if str(_lowerCamelCase).startswith("500") or str(_lowerCamelCase).startswith("502"): pytest.xfail(str(_lowerCamelCase)) raise err return decorator.decorator(_wrapper , _lowerCamelCase) class snake_case_ : def __init__( self : int , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : List[str] ) -> List[str]: lowercase__ : Tuple = returncode lowercase__ : int = stdout lowercase__ : Union[str, Any] = stderr async def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict): while True: lowercase__ : Optional[int] = await stream.readline() if line: callback(_lowerCamelCase) else: break async def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : int=None , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : Tuple=False): if echo: print("\nRunning: " , " ".join(_lowerCamelCase)) lowercase__ : Optional[int] = await asyncio.create_subprocess_exec( cmd[0] , *cmd[1:] , stdin=_lowerCamelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_lowerCamelCase , ) # note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe # https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait # # If it starts hanging, will need to switch to the following code. The problem is that no data # will be seen until it's done and if it hangs for example there will be no debug info. # out, err = await p.communicate() # return _RunOutput(p.returncode, out, err) lowercase__ : str = [] lowercase__ : List[str] = [] def tee(_lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int]=""): lowercase__ : Optional[int] = line.decode("utf-8").rstrip() sink.append(_lowerCamelCase) if not quiet: print(_lowerCamelCase , _lowerCamelCase , file=_lowerCamelCase) # XXX: the timeout doesn't seem to make any difference here await asyncio.wait( [ _read_stream(p.stdout , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stdout , label="stdout:")), _read_stream(p.stderr , lambda _lowerCamelCase: tee(_lowerCamelCase , _lowerCamelCase , sys.stderr , label="stderr:")), ] , timeout=_lowerCamelCase , ) return _RunOutput(await p.wait() , _lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : int=180 , _lowerCamelCase : Union[str, Any]=False , _lowerCamelCase : Optional[Any]=True): lowercase__ : Any = asyncio.get_event_loop() lowercase__ : Tuple = loop.run_until_complete( _stream_subprocess(_lowerCamelCase , env=_lowerCamelCase , stdin=_lowerCamelCase , timeout=_lowerCamelCase , quiet=_lowerCamelCase , echo=_lowerCamelCase)) lowercase__ : int = " ".join(_lowerCamelCase) if result.returncode > 0: lowercase__ : Any = "\n".join(result.stderr) raise RuntimeError( f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n''' f'''The combined stderr from workers follows:\n{stderr}''') # check that the subprocess actually did run and produced some output, should the test rely on # the remote side to do the testing if not result.stdout and not result.stderr: raise RuntimeError(f'''\'{cmd_str}\' produced no output.''') return result def lowercase_ ( ): lowercase__ : List[str] = os.environ.get("PYTEST_XDIST_WORKER" , "gw0") lowercase__ : str = re.sub(R"^gw" , "" , _lowerCamelCase , 0 , re.M) return int(_lowerCamelCase) def lowercase_ ( ): lowercase__ : Union[str, Any] = 2_9500 lowercase__ : Optional[int] = pytest_xdist_worker_id() return port + uniq_delta
87
1
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[int]=None): if attention_mask is None: lowercase__ : List[str] = tf.cast(tf.math.not_equal(_lowerCamelCase , config.pad_token_id) , tf.inta) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class snake_case_ : __A : Union[str, Any] = OPTConfig __A : List[str] = {} __A : str = "gelu" def __init__( self : Any , lowercase_ : Optional[int] , lowercase_ : Optional[Any]=13 , lowercase_ : List[str]=7 , lowercase_ : Tuple=True , lowercase_ : Dict=False , lowercase_ : Dict=99 , lowercase_ : int=16 , lowercase_ : Any=2 , lowercase_ : int=4 , lowercase_ : Optional[int]=4 , lowercase_ : Optional[Any]="gelu" , lowercase_ : List[str]=0.1 , lowercase_ : Tuple=0.1 , lowercase_ : Tuple=20 , lowercase_ : List[str]=2 , lowercase_ : Union[str, Any]=1 , lowercase_ : Optional[Any]=0 , lowercase_ : Union[str, Any]=16 , lowercase_ : Dict=16 , ) -> Any: lowercase__ : Optional[int] = parent lowercase__ : Optional[Any] = batch_size lowercase__ : Dict = seq_length lowercase__ : Optional[int] = is_training lowercase__ : Union[str, Any] = use_labels lowercase__ : Tuple = vocab_size lowercase__ : Dict = hidden_size lowercase__ : Dict = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : List[str] = intermediate_size lowercase__ : List[str] = hidden_act lowercase__ : List[str] = hidden_dropout_prob lowercase__ : int = attention_probs_dropout_prob lowercase__ : Dict = max_position_embeddings lowercase__ : Optional[int] = eos_token_id lowercase__ : Optional[int] = pad_token_id lowercase__ : Dict = bos_token_id lowercase__ : List[str] = embed_dim lowercase__ : List[str] = word_embed_proj_dim lowercase__ : List[str] = False def __UpperCamelCase ( self : str ) -> List[Any]: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) lowercase__ : List[Any] = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 ) lowercase__ : int = tf.concat([input_ids, eos_tensor] , axis=1 ) lowercase__ : Any = self.config_cls( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=lowercase_ , **self.config_updates , ) lowercase__ : Union[str, Any] = prepare_opt_inputs_dict(lowercase_ , lowercase_ ) return config, inputs_dict def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : int , lowercase_ : Optional[Any] ) -> Tuple: lowercase__ : Tuple = TFOPTModel(config=lowercase_ ) lowercase__ : Optional[Any] = inputs_dict["input_ids"] lowercase__ : Optional[int] = input_ids[:1, :] lowercase__ : int = inputs_dict["attention_mask"][:1, :] lowercase__ : List[Any] = 1 # first forward pass lowercase__ : Any = model(lowercase_ , attention_mask=lowercase_ , use_cache=lowercase_ ) lowercase__ , lowercase__ : Tuple = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids lowercase__ : Dict = ids_tensor((self.batch_size, 3) , config.vocab_size ) lowercase__ : str = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta ) # append to next input_ids and lowercase__ : Tuple = tf.concat([input_ids, next_tokens] , axis=-1 ) lowercase__ : Dict = tf.concat([attention_mask, next_attn_mask] , axis=-1 ) lowercase__ : Union[str, Any] = model(lowercase_ , attention_mask=lowercase_ )[0] lowercase__ : Optional[Any] = model(lowercase_ , attention_mask=lowercase_ , past_key_values=lowercase_ )[0] self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] ) # select random slice lowercase__ : Any = int(ids_tensor((1,) , output_from_past.shape[-1] ) ) lowercase__ : Dict = output_from_no_past[:, -3:, random_slice_idx] lowercase__ : Any = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(lowercase_ , lowercase_ , rtol=1E-3 ) @require_tf class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : Any = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () __A : str = (TFOPTForCausalLM,) if is_tf_available() else () __A : Tuple = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) __A : Tuple = False __A : Union[str, Any] = False __A : int = False __A : Dict = 10 def __UpperCamelCase ( self : Dict ) -> List[Any]: lowercase__ : List[str] = TFOPTModelTester(self ) lowercase__ : Tuple = ConfigTester(self , config_class=lowercase_ ) def __UpperCamelCase ( self : Tuple ) -> Any: self.config_tester.run_common_tests() def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : str = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> Tuple: lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(lowercase_ : Tuple , lowercase_ : str ): if hasattr(lowercase_ , "weight" ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(lowercase_ , "weight" ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10]: # build the embeddings lowercase__ : List[str] = model_class(config=lowercase_ ) lowercase__ : List[Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowercase__ : List[Any] = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(lowercase_ ) lowercase__ : Optional[Any] = _get_word_embedding_weight(lowercase_ , model.get_input_embeddings() ) lowercase__ : Tuple = _get_word_embedding_weight(lowercase_ , model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. lowercase__ : Dict = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0] , lowercase_ ) # check that weights remain the same after resizing lowercase__ : List[Any] = True for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowercase__ : List[Any] = False self.assertTrue(lowercase_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0] , lowercase_ ) lowercase__ : Dict = True for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: lowercase__ : List[str] = False self.assertTrue(lowercase_ ) def lowercase_ ( _lowerCamelCase : int): return tf.constant(_lowerCamelCase , dtype=tf.intaa) @require_tf class snake_case_ ( unittest.TestCase ): __A : str = 99 def __UpperCamelCase ( self : str ) -> Dict: lowercase__ : Any = tf.ones((4, 1) , dtype=tf.intaa ) * 2 lowercase__ : Optional[Any] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 ) lowercase__ : int = input_ids.shape[0] lowercase__ : Any = OPTConfig( vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , ) return config, input_ids, batch_size @require_sentencepiece @require_tf class snake_case_ ( unittest.TestCase ): @slow def __UpperCamelCase ( self : str ) -> List[str]: lowercase__ : Dict = TFOPTModel.from_pretrained("facebook/opt-350m" ) lowercase__ : Dict = _long_tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] ) lowercase__ : Tuple = tf.not_equal(lowercase_ , model.config.pad_token_id ) with tf.GradientTape(): lowercase__ : Union[str, Any] = model(input_ids=lowercase_ , attention_mask=lowercase_ ).last_hidden_state lowercase__ : str = (1, 11, 5_12) self.assertEqual(output.shape , lowercase_ ) lowercase__ : str = tf.constant( [[-0.28_73, -1.92_18, -0.30_33], [-1.27_10, -0.13_38, -0.19_02], [0.40_95, 0.12_14, -1.31_21]] ) self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4E-3 ) ) lowercase__ : Union[str, Any] = tf.function(lowercase_ , jit_compile=lowercase_ ) lowercase__ : List[str] = xla_generate(lowercase_ , lowercase_ )[0] self.assertTrue(np.allclose(output[:, :3, :3] , lowercase_ , atol=4E-2 ) ) @require_tf @slow class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : Dict ) -> Optional[int]: super().setUp() lowercase__ : str = "facebook/opt-350m" def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: lowercase__ : Dict = TFOPTForCausalLM.from_pretrained(self.path_model ) lowercase__ : Any = GPTaTokenizer.from_pretrained(self.path_model ) lowercase__ : List[str] = [ "Today is a beautiful day and I want to", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False lowercase__ : Tuple = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ , add_special_tokens=lowercase_ ) lowercase__ : List[str] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) lowercase__ : Optional[Any] = tf.constant( [ [1.38_51, -13.89_23, -10.52_29, -10.75_33, -0.23_09, -10.23_84, -0.53_65, -9.09_47, -5.16_70], [-4.70_73, -10.62_76, -3.94_15, -21.52_42, -0.28_22, -0.28_22, -0.28_22, -0.28_22, -0.28_22], [0.62_47, -3.42_29, -8.91_79, -1.42_97, -14.16_50, 1.41_46, -9.02_18, -0.27_03, -0.27_03], [6.47_83, -1.99_13, -10.79_26, -2.33_36, 1.50_92, -0.99_74, -6.82_13, 1.34_77, 1.34_77], ] ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-4 ) ) lowercase__ : Dict = tf.function(lowercase_ , jit_compile=lowercase_ ) lowercase__ : Any = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 ) self.assertTrue(np.allclose(lowercase_ , lowercase_ , atol=1E-4 ) ) @require_tf @slow class snake_case_ ( unittest.TestCase ): @property def __UpperCamelCase ( self : int ) -> List[Any]: return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def __UpperCamelCase ( self : List[Any] ) -> Optional[Any]: lowercase__ : List[Any] = "facebook/opt-125m" lowercase__ : Optional[Any] = [ "Today is a beautiful day and I want to", "In the city of New York, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowercase__ : List[Any] = [] lowercase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ ) lowercase__ : List[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowercase__ : Tuple = tokenizer(lowercase_ , return_tensors="tf" ).input_ids lowercase__ : Any = model.generate(lowercase_ , max_length=10 ) lowercase__ : Optional[int] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Dict = "facebook/opt-350m" lowercase__ : Optional[int] = GPTaTokenizer.from_pretrained(lowercase_ ) lowercase__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(lowercase_ ) lowercase__ : Tuple = "left" # use different length sentences to test batching lowercase__ : str = [ "Hello, my dog is a little", "Today, I", ] lowercase__ : List[str] = tokenizer(lowercase_ , return_tensors="tf" , padding=lowercase_ ) lowercase__ : Dict = inputs["input_ids"] lowercase__ : int = model.generate(input_ids=lowercase_ , attention_mask=inputs["attention_mask"] ) lowercase__ : Tuple = tokenizer(sentences[0] , return_tensors="tf" ).input_ids lowercase__ : Dict = model.generate(input_ids=lowercase_ ) lowercase__ : Union[str, Any] = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1] , tf.intaa ) ) lowercase__ : Optional[Any] = tokenizer(sentences[1] , return_tensors="tf" ).input_ids lowercase__ : List[str] = model.generate(input_ids=lowercase_ , max_length=model.config.max_length - num_paddings ) lowercase__ : List[str] = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) lowercase__ : Tuple = tokenizer.decode(output_non_padded[0] , skip_special_tokens=lowercase_ ) lowercase__ : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=lowercase_ ) lowercase__ : List[Any] = [ "Hello, my dog is a little bit of a dork.\nI'm a little bit", "Today, I was in the middle of a conversation with a friend about the", ] self.assertListEqual(lowercase_ , lowercase_ ) self.assertListEqual(lowercase_ , [non_padded_sentence, padded_sentence] ) def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: lowercase__ : Optional[int] = "facebook/opt-350m" lowercase__ : Union[str, Any] = [ "Today is a beautiful day and I want to", "In the city of San Francisco, the city", "Paris is the capital of France and the capital", "Computers and mobile phones have taken over the", ] lowercase__ : Optional[int] = [] lowercase__ : Union[str, Any] = GPTaTokenizer.from_pretrained(lowercase_ ) lowercase__ : Optional[int] = TFOPTForCausalLM.from_pretrained(lowercase_ ) for prompt in self.prompts: lowercase__ : List[str] = tokenizer(lowercase_ , return_tensors="tf" ).input_ids lowercase__ : Optional[Any] = model.generate(lowercase_ , max_length=10 ) lowercase__ : int = tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ ) predicted_outputs += generated_string self.assertListEqual(lowercase_ , lowercase_ )
87
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
1
from collections import UserDict from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax UpperCamelCase = logging.get_logger(__name__) @add_end_docstrings(__A ) class snake_case_ ( __A ): def __init__( self : List[Any] , **lowercase_ : Union[str, Any] ) -> Tuple: super().__init__(**lowercase_ ) requires_backends(self , "vision" ) self.check_model_type( TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING ) def __call__( self : Optional[int] , lowercase_ : Union[str, List[str], "Image", List["Image"]] , **lowercase_ : str ) -> Optional[int]: return super().__call__(lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Any , **lowercase_ : Tuple ) -> Optional[Any]: lowercase__ : List[Any] = {} if "candidate_labels" in kwargs: lowercase__ : int = kwargs["candidate_labels"] if "hypothesis_template" in kwargs: lowercase__ : Dict = kwargs["hypothesis_template"] return preprocess_params, {}, {} def __UpperCamelCase ( self : str , lowercase_ : List[Any] , lowercase_ : Tuple=None , lowercase_ : List[str]="This is a photo of {}." ) -> Tuple: lowercase__ : str = load_image(lowercase_ ) lowercase__ : List[Any] = self.image_processor(images=[image] , return_tensors=self.framework ) lowercase__ : Optional[int] = candidate_labels lowercase__ : Optional[Any] = [hypothesis_template.format(lowercase_ ) for x in candidate_labels] lowercase__ : Union[str, Any] = self.tokenizer(lowercase_ , return_tensors=self.framework , padding=lowercase_ ) lowercase__ : int = [text_inputs] return inputs def __UpperCamelCase ( self : Dict , lowercase_ : List[Any] ) -> Optional[int]: lowercase__ : Tuple = model_inputs.pop("candidate_labels" ) lowercase__ : List[Any] = model_inputs.pop("text_inputs" ) if isinstance(text_inputs[0] , lowercase_ ): lowercase__ : List[str] = text_inputs[0] else: # Batching case. lowercase__ : Any = text_inputs[0][0] lowercase__ : List[str] = self.model(**lowercase_ , **lowercase_ ) lowercase__ : str = { "candidate_labels": candidate_labels, "logits": outputs.logits_per_image, } return model_outputs def __UpperCamelCase ( self : str , lowercase_ : int ) -> List[Any]: lowercase__ : Any = model_outputs.pop("candidate_labels" ) lowercase__ : Tuple = model_outputs["logits"][0] if self.framework == "pt": lowercase__ : List[str] = logits.softmax(dim=-1 ).squeeze(-1 ) lowercase__ : str = probs.tolist() if not isinstance(lowercase_ , lowercase_ ): lowercase__ : Union[str, Any] = [scores] elif self.framework == "tf": lowercase__ : Optional[Any] = stable_softmax(lowercase_ , axis=-1 ) lowercase__ : List[Any] = probs.numpy().tolist() else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) lowercase__ : Optional[Any] = [ {"score": score, "label": candidate_label} for score, candidate_label in sorted(zip(lowercase_ , lowercase_ ) , key=lambda lowercase_ : -x[0] ) ] return result
87
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) UpperCamelCase = { '''configuration_electra''': ['''ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ElectraConfig''', '''ElectraOnnxConfig'''], '''tokenization_electra''': ['''ElectraTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''ElectraTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''ElectraForCausalLM''', '''ElectraForMaskedLM''', '''ElectraForMultipleChoice''', '''ElectraForPreTraining''', '''ElectraForQuestionAnswering''', '''ElectraForSequenceClassification''', '''ElectraForTokenClassification''', '''ElectraModel''', '''ElectraPreTrainedModel''', '''load_tf_weights_in_electra''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFElectraForMaskedLM''', '''TFElectraForMultipleChoice''', '''TFElectraForPreTraining''', '''TFElectraForQuestionAnswering''', '''TFElectraForSequenceClassification''', '''TFElectraForTokenClassification''', '''TFElectraModel''', '''TFElectraPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''FlaxElectraForCausalLM''', '''FlaxElectraForMaskedLM''', '''FlaxElectraForMultipleChoice''', '''FlaxElectraForPreTraining''', '''FlaxElectraForQuestionAnswering''', '''FlaxElectraForSequenceClassification''', '''FlaxElectraForTokenClassification''', '''FlaxElectraModel''', '''FlaxElectraPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraOnnxConfig from .tokenization_electra import ElectraTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_electra_fast import ElectraTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForCausalLM, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_electra import ( FlaxElectraForCausalLM, FlaxElectraForMaskedLM, FlaxElectraForMultipleChoice, FlaxElectraForPreTraining, FlaxElectraForQuestionAnswering, FlaxElectraForSequenceClassification, FlaxElectraForTokenClassification, FlaxElectraModel, FlaxElectraPreTrainedModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available UpperCamelCase = {'''configuration_speech_encoder_decoder''': ['''SpeechEncoderDecoderConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''SpeechEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''FlaxSpeechEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
import json import os import unittest from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers, require_torch from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case_ ( __A ,unittest.TestCase ): __A : Union[str, Any] = LEDTokenizer __A : Union[str, Any] = LEDTokenizerFast __A : Optional[Any] = True def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: super().setUp() lowercase__ : List[str] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "\u0120", "\u0120l", "\u0120n", "\u0120lo", "\u0120low", "er", "\u0120lowest", "\u0120newer", "\u0120wider", "<unk>", ] lowercase__ : Optional[int] = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) lowercase__ : str = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""] lowercase__ : Tuple = {"unk_token": "<unk>"} lowercase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) def __UpperCamelCase ( self : int , **lowercase_ : str ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : List[Any] , **lowercase_ : Any ) -> List[Any]: kwargs.update(self.special_tokens_map ) return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : Any ) -> Tuple: return "lower newer", "lower newer" @cached_property def __UpperCamelCase ( self : Tuple ) -> Optional[Any]: return LEDTokenizer.from_pretrained("allenai/led-base-16384" ) @cached_property def __UpperCamelCase ( self : Tuple ) -> int: return LEDTokenizerFast.from_pretrained("allenai/led-base-16384" ) @require_torch def __UpperCamelCase ( self : int ) -> List[Any]: lowercase__ : Union[str, Any] = ["A long paragraph for summarization.", "Another paragraph for summarization."] lowercase__ : str = [0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(lowercase_ , max_length=len(lowercase_ ) , padding=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual((2, 9) , batch.input_ids.shape ) self.assertEqual((2, 9) , batch.attention_mask.shape ) lowercase__ : Union[str, Any] = batch.input_ids.tolist()[0] self.assertListEqual(lowercase_ , lowercase_ ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : Dict = ["A long paragraph for summarization.", "Another paragraph for summarization."] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Optional[int] = tokenizer(lowercase_ , padding=lowercase_ , return_tensors="pt" ) self.assertIn("input_ids" , lowercase_ ) self.assertIn("attention_mask" , lowercase_ ) self.assertNotIn("labels" , lowercase_ ) self.assertNotIn("decoder_attention_mask" , lowercase_ ) @require_torch def __UpperCamelCase ( self : Optional[Any] ) -> Any: lowercase__ : Dict = [ "Summary of the text.", "Another summary.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : Dict = tokenizer(text_target=lowercase_ , max_length=32 , padding="max_length" , return_tensors="pt" ) self.assertEqual(32 , targets["input_ids"].shape[1] ) @require_torch def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = tokenizer( ["I am a small frog" * 10_24, "I am a small frog"] , padding=lowercase_ , truncation=lowercase_ , return_tensors="pt" ) self.assertIsInstance(lowercase_ , lowercase_ ) self.assertEqual(batch.input_ids.shape , (2, 51_22) ) @require_torch def __UpperCamelCase ( self : List[str] ) -> Any: lowercase__ : Union[str, Any] = ["A long paragraph for summarization."] lowercase__ : List[Any] = [ "Summary of the text.", ] for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : List[Any] = tokenizer(lowercase_ , return_tensors="pt" ) lowercase__ : Dict = tokenizer(text_target=lowercase_ , return_tensors="pt" ) lowercase__ : Optional[int] = inputs["input_ids"] lowercase__ : str = targets["input_ids"] self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() ) self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() ) self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() ) @require_torch def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]: lowercase__ : int = ["Summary of the text.", "Another summary."] lowercase__ : List[str] = [[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]] lowercase__ : Tuple = tokenizer(lowercase_ , padding=lowercase_ ) lowercase__ : int = [[0] * len(lowercase_ ) for x in encoded_output["input_ids"]] lowercase__ : Any = tokenizer.pad(lowercase_ ) self.assertSequenceEqual(outputs["global_attention_mask"] , lowercase_ ) def __UpperCamelCase ( self : int ) -> Union[str, Any]: pass def __UpperCamelCase ( self : int ) -> Optional[Any]: for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest(F'''{tokenizer.__class__.__name__} ({pretrained_name})''' ): lowercase__ : List[Any] = self.rust_tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[str] = self.tokenizer_class.from_pretrained(lowercase_ , **lowercase_ ) lowercase__ : List[Any] = "A, <mask> AllenNLP sentence." lowercase__ : Tuple = tokenizer_r.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) lowercase__ : List[str] = tokenizer_p.encode_plus(lowercase_ , add_special_tokens=lowercase_ , return_token_type_ids=lowercase_ ) self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) ) self.assertEqual( sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , ) lowercase__ : Any = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] ) lowercase__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] ) self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] ) self.assertSequenceEqual( lowercase_ , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
87
1
import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class snake_case_ ( unittest.TestCase ): def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : Optional[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ ) lowercase__ : Tuple = -1 lowercase__ : Union[str, Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) lowercase__ : int = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ ) lowercase__ : Union[str, Any] = tokenizer.decode(greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : int = TextStreamer(lowercase_ ) model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : Any = cs.out[:-1] self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : List[str] ) -> Union[str, Any]: lowercase__ : Tuple = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ ) lowercase__ : str = -1 lowercase__ : Optional[Any] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) lowercase__ : Optional[Any] = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ ) lowercase__ : List[Any] = tokenizer.decode(greedy_ids[0] ) lowercase__ : Any = TextIteratorStreamer(lowercase_ ) lowercase__ : List[Any] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Dict = Thread(target=model.generate , kwargs=lowercase_ ) thread.start() lowercase__ : List[str] = "" for new_text in streamer: streamer_text += new_text self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: lowercase__ : str = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Dict = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ ) lowercase__ : int = -1 lowercase__ : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) lowercase__ : Dict = model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ ) lowercase__ : Tuple = greedy_ids[:, input_ids.shape[1] :] lowercase__ : Dict = tokenizer.decode(new_greedy_ids[0] ) with CaptureStdout() as cs: lowercase__ : List[Any] = TextStreamer(lowercase_ , skip_prompt=lowercase_ ) model.generate(lowercase_ , max_new_tokens=10 , do_sample=lowercase_ , streamer=lowercase_ ) # The greedy text should be printed to stdout, except for the final "\n" in the streamer lowercase__ : Dict = cs.out[:-1] self.assertEqual(lowercase_ , lowercase_ ) def __UpperCamelCase ( self : str ) -> Optional[int]: # Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested # with actual models -- the dummy models' tokenizers are not aligned with their models, and # `skip_special_tokens=True` has no effect on them lowercase__ : Dict = AutoTokenizer.from_pretrained("distilgpt2" ) lowercase__ : str = AutoModelForCausalLM.from_pretrained("distilgpt2" ).to(lowercase_ ) lowercase__ : Any = -1 lowercase__ : List[str] = torch.ones((1, 5) , device=lowercase_ ).long() * model.config.bos_token_id with CaptureStdout() as cs: lowercase__ : int = TextStreamer(lowercase_ , skip_special_tokens=lowercase_ ) model.generate(lowercase_ , max_new_tokens=1 , do_sample=lowercase_ , streamer=lowercase_ ) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token lowercase__ : str = cs.out[:-1] # Remove the final "\n" lowercase__ : str = tokenizer(lowercase_ , return_tensors="pt" ) self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) ) def __UpperCamelCase ( self : Any ) -> Union[str, Any]: lowercase__ : int = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" ) lowercase__ : Optional[int] = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2" ).to(lowercase_ ) lowercase__ : List[str] = -1 lowercase__ : Tuple = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(lowercase_ ) lowercase__ : Optional[Any] = TextIteratorStreamer(lowercase_ , timeout=0.0_01 ) lowercase__ : Optional[int] = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer} lowercase__ : Tuple = Thread(target=model.generate , kwargs=lowercase_ ) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(lowercase_ ): lowercase__ : List[Any] = "" for new_text in streamer: streamer_text += new_text
87
import math from typing import Any, Callable, List, Optional, Tuple, Union import numpy as np import torch from ...models import TaFilmDecoder from ...schedulers import DDPMScheduler from ...utils import is_onnx_available, logging, randn_tensor if is_onnx_available(): from ..onnx_utils import OnnxRuntimeModel from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline from .continous_encoder import SpectrogramContEncoder from .notes_encoder import SpectrogramNotesEncoder UpperCamelCase = logging.get_logger(__name__) # pylint: disable=invalid-name UpperCamelCase = 256 class snake_case_ ( __A ): __A : str = ["melgan"] def __init__( self : str , lowercase_ : SpectrogramNotesEncoder , lowercase_ : SpectrogramContEncoder , lowercase_ : TaFilmDecoder , lowercase_ : DDPMScheduler , lowercase_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None: super().__init__() # From MELGAN lowercase__ : List[Any] = math.log(1E-5 ) # Matches MelGAN training. lowercase__ : str = 4.0 # Largest value for most examples lowercase__ : Any = 1_28 self.register_modules( notes_encoder=lowercase_ , continuous_encoder=lowercase_ , decoder=lowercase_ , scheduler=lowercase_ , melgan=lowercase_ , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : Dict=False ) -> Optional[Any]: lowercase__ , lowercase__ : int = output_range if clip: lowercase__ : Optional[Any] = torch.clip(lowercase_ , self.min_value , self.max_value ) # Scale to [0, 1]. lowercase__ : List[str] = (features - self.min_value) / (self.max_value - self.min_value) # Scale to [min_out, max_out]. return zero_one * (max_out - min_out) + min_out def __UpperCamelCase ( self : Optional[int] , lowercase_ : List[str] , lowercase_ : List[str]=(-1.0, 1.0) , lowercase_ : List[Any]=False ) -> Union[str, Any]: lowercase__ , lowercase__ : Tuple = input_range lowercase__ : Optional[Any] = torch.clip(lowercase_ , lowercase_ , lowercase_ ) if clip else outputs # Scale to [0, 1]. lowercase__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out) # Scale to [self.min_value, self.max_value]. return zero_one * (self.max_value - self.min_value) + self.min_value def __UpperCamelCase ( self : List[str] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Tuple ) -> List[str]: lowercase__ : Optional[Any] = input_tokens > 0 lowercase__ , lowercase__ : int = self.notes_encoder( encoder_input_tokens=lowercase_ , encoder_inputs_mask=lowercase_ ) lowercase__ , lowercase__ : List[Any] = self.continuous_encoder( encoder_inputs=lowercase_ , encoder_inputs_mask=lowercase_ ) return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)] def __UpperCamelCase ( self : Optional[Any] , lowercase_ : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str ) -> Tuple: lowercase__ : Union[str, Any] = noise_time if not torch.is_tensor(lowercase_ ): lowercase__ : Optional[Any] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device ) elif torch.is_tensor(lowercase_ ) and len(timesteps.shape ) == 0: lowercase__ : Optional[Any] = timesteps[None].to(input_tokens.device ) # broadcast to batch dimension in a way that's compatible with ONNX/Core ML lowercase__ : int = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device ) lowercase__ : str = self.decoder( encodings_and_masks=lowercase_ , decoder_input_tokens=lowercase_ , decoder_noise_time=lowercase_ ) return logits @torch.no_grad() def __call__( self : List[str] , lowercase_ : List[List[int]] , lowercase_ : Optional[torch.Generator] = None , lowercase_ : int = 1_00 , lowercase_ : bool = True , lowercase_ : str = "numpy" , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]: if (callback_steps is None) or ( callback_steps is not None and (not isinstance(lowercase_ , lowercase_ ) or callback_steps <= 0) ): raise ValueError( F'''`callback_steps` has to be a positive integer but is {callback_steps} of type''' F''' {type(lowercase_ )}.''' ) lowercase__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa ) lowercase__ : Optional[int] = np.zeros([1, 0, self.n_dims] , np.floataa ) lowercase__ : str = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) for i, encoder_input_tokens in enumerate(lowercase_ ): if i == 0: lowercase__ : Union[str, Any] = torch.from_numpy(pred_mel[:1].copy() ).to( device=self.device , dtype=self.decoder.dtype ) # The first chunk has no previous context. lowercase__ : List[str] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=lowercase_ , device=self.device ) else: # The full song pipeline does not feed in a context feature, so the mask # will be all 0s after the feature converter. Because we know we're # feeding in a full context chunk from the previous prediction, set it # to all 1s. lowercase__ : str = ones lowercase__ : str = self.scale_features( lowercase_ , output_range=[-1.0, 1.0] , clip=lowercase_ ) lowercase__ : str = self.encode( input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=lowercase_ , continuous_mask=lowercase_ , ) # Sample encoder_continuous_inputs shaped gaussian noise to begin loop lowercase__ : List[str] = randn_tensor( shape=encoder_continuous_inputs.shape , generator=lowercase_ , device=self.device , dtype=self.decoder.dtype , ) # set step values self.scheduler.set_timesteps(lowercase_ ) # Denoising diffusion loop for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ): lowercase__ : Optional[int] = self.decode( encodings_and_masks=lowercase_ , input_tokens=lowercase_ , noise_time=t / self.scheduler.config.num_train_timesteps , ) # Compute previous output: x_t -> x_t-1 lowercase__ : Optional[Any] = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ ).prev_sample lowercase__ : Tuple = self.scale_to_features(lowercase_ , input_range=[-1.0, 1.0] ) lowercase__ : List[str] = mel[:1] lowercase__ : Optional[int] = mel.cpu().float().numpy() lowercase__ : str = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 ) # call the callback, if provided if callback is not None and i % callback_steps == 0: callback(lowercase_ , lowercase_ ) logger.info("Generated segment" , lowercase_ ) if output_type == "numpy" and not is_onnx_available(): raise ValueError( "Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." ) elif output_type == "numpy" and self.melgan is None: raise ValueError( "Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." ) if output_type == "numpy": lowercase__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) ) else: lowercase__ : Dict = full_pred_mel if not return_dict: return (output,) return AudioPipelineOutput(audios=lowercase_ )
87
1
from __future__ import annotations import time from math import sqrt # 1 for manhattan, 0 for euclidean UpperCamelCase = 0 UpperCamelCase = [ [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles [0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0], [1, 0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0], ] UpperCamelCase = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right UpperCamelCase = tuple[int, int] class snake_case_ : def __init__( self : Any , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : int , lowercase_ : Node | None , ) -> None: lowercase__ : List[str] = pos_x lowercase__ : Optional[int] = pos_y lowercase__ : Tuple = (pos_y, pos_x) lowercase__ : Union[str, Any] = goal_x lowercase__ : int = goal_y lowercase__ : Any = g_cost lowercase__ : int = parent lowercase__ : Any = self.calculate_heuristic() lowercase__ : Union[str, Any] = self.g_cost + self.h_cost def __UpperCamelCase ( self : Optional[int] ) -> float: lowercase__ : Tuple = self.pos_x - self.goal_x lowercase__ : int = self.pos_y - self.goal_y if HEURISTIC == 1: return abs(lowercase_ ) + abs(lowercase_ ) else: return sqrt(dy**2 + dx**2 ) def __lt__( self : Tuple , lowercase_ : Node ) -> bool: return self.f_cost < other.f_cost class snake_case_ : def __init__( self : Union[str, Any] , lowercase_ : TPosition , lowercase_ : TPosition ) -> Optional[int]: lowercase__ : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , lowercase_ ) lowercase__ : List[str] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , lowercase_ ) lowercase__ : Dict = [self.start] lowercase__ : list[Node] = [] lowercase__ : Tuple = False def __UpperCamelCase ( self : Dict ) -> list[TPosition]: while self.open_nodes: # Open Nodes are sorted using __lt__ self.open_nodes.sort() lowercase__ : str = self.open_nodes.pop(0 ) if current_node.pos == self.target.pos: return self.retrace_path(lowercase_ ) self.closed_nodes.append(lowercase_ ) lowercase__ : str = self.get_successors(lowercase_ ) for child_node in successors: if child_node in self.closed_nodes: continue if child_node not in self.open_nodes: self.open_nodes.append(lowercase_ ) else: # retrieve the best current path lowercase__ : Union[str, Any] = self.open_nodes.pop(self.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: self.open_nodes.append(lowercase_ ) else: self.open_nodes.append(lowercase_ ) return [self.start.pos] def __UpperCamelCase ( self : Optional[int] , lowercase_ : Node ) -> list[Node]: lowercase__ : List[str] = [] for action in delta: lowercase__ : List[Any] = parent.pos_x + action[1] lowercase__ : Dict = parent.pos_y + action[0] if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(lowercase_ ) - 1): continue if grid[pos_y][pos_x] != 0: continue successors.append( Node( lowercase_ , lowercase_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , lowercase_ , ) ) return successors def __UpperCamelCase ( self : str , lowercase_ : Node | None ) -> list[TPosition]: lowercase__ : List[str] = node lowercase__ : Dict = [] while current_node is not None: path.append((current_node.pos_y, current_node.pos_x) ) lowercase__ : Any = current_node.parent path.reverse() return path class snake_case_ : def __init__( self : Dict , lowercase_ : TPosition , lowercase_ : TPosition ) -> None: lowercase__ : Union[str, Any] = AStar(lowercase_ , lowercase_ ) lowercase__ : Tuple = AStar(lowercase_ , lowercase_ ) lowercase__ : str = False def __UpperCamelCase ( self : Tuple ) -> list[TPosition]: while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes: self.fwd_astar.open_nodes.sort() self.bwd_astar.open_nodes.sort() lowercase__ : List[str] = self.fwd_astar.open_nodes.pop(0 ) lowercase__ : Union[str, Any] = self.bwd_astar.open_nodes.pop(0 ) if current_bwd_node.pos == current_fwd_node.pos: return self.retrace_bidirectional_path( lowercase_ , lowercase_ ) self.fwd_astar.closed_nodes.append(lowercase_ ) self.bwd_astar.closed_nodes.append(lowercase_ ) lowercase__ : List[str] = current_bwd_node lowercase__ : Tuple = current_fwd_node lowercase__ : List[str] = { self.fwd_astar: self.fwd_astar.get_successors(lowercase_ ), self.bwd_astar: self.bwd_astar.get_successors(lowercase_ ), } for astar in [self.fwd_astar, self.bwd_astar]: for child_node in successors[astar]: if child_node in astar.closed_nodes: continue if child_node not in astar.open_nodes: astar.open_nodes.append(lowercase_ ) else: # retrieve the best current path lowercase__ : List[str] = astar.open_nodes.pop( astar.open_nodes.index(lowercase_ ) ) if child_node.g_cost < better_node.g_cost: astar.open_nodes.append(lowercase_ ) else: astar.open_nodes.append(lowercase_ ) return [self.fwd_astar.start.pos] def __UpperCamelCase ( self : Tuple , lowercase_ : Node , lowercase_ : Node ) -> list[TPosition]: lowercase__ : str = self.fwd_astar.retrace_path(lowercase_ ) lowercase__ : Dict = self.bwd_astar.retrace_path(lowercase_ ) bwd_path.pop() bwd_path.reverse() lowercase__ : Union[str, Any] = fwd_path + bwd_path return path if __name__ == "__main__": # all coordinates are given in format [y,x] UpperCamelCase = (0, 0) UpperCamelCase = (len(grid) - 1, len(grid[0]) - 1) for elem in grid: print(elem) UpperCamelCase = time.time() UpperCamelCase = AStar(init, goal) UpperCamelCase = a_star.search() UpperCamelCase = time.time() - start_time print(f"AStar execution time = {end_time:f} seconds") UpperCamelCase = time.time() UpperCamelCase = BidirectionalAStar(init, goal) UpperCamelCase = time.time() - bd_start_time print(f"BidirectionalAStar execution time = {bd_end_time:f} seconds")
87
import unittest from datasets import load_dataset from transformers.pipelines import pipeline from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow @is_pipeline_test @require_torch class snake_case_ ( unittest.TestCase ): @require_torch def __UpperCamelCase ( self : Optional[int] ) -> List[Any]: lowercase__ : Union[str, Any] = pipeline( task="zero-shot-audio-classification" , model="hf-internal-testing/tiny-clap-htsat-unfused" ) lowercase__ : List[str] = load_dataset("ashraq/esc50" ) lowercase__ : List[Any] = dataset["train"]["audio"][-1]["array"] lowercase__ : Dict = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [{"score": 0.5_01, "label": "Sound of a dog"}, {"score": 0.4_99, "label": "Sound of vaccum cleaner"}] , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : str ) -> Optional[int]: pass @slow @require_torch def __UpperCamelCase ( self : List[str] ) -> int: lowercase__ : Tuple = pipeline( task="zero-shot-audio-classification" , model="laion/clap-htsat-unfused" , ) # This is an audio of a dog lowercase__ : Union[str, Any] = load_dataset("ashraq/esc50" ) lowercase__ : Tuple = dataset["train"]["audio"][-1]["array"] lowercase__ : List[Any] = audio_classifier(lowercase_ , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ] , ) lowercase__ : int = audio_classifier([audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) lowercase__ : Tuple = audio_classifier( [audio] * 5 , candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] , batch_size=5 ) self.assertEqual( nested_simplify(lowercase_ ) , [ [ {"score": 0.9_99, "label": "Sound of a dog"}, {"score": 0.0_01, "label": "Sound of vaccum cleaner"}, ], ] * 5 , ) @unittest.skip("No models are available in TF" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Dict: pass
87
1
import argparse import json from collections import OrderedDict import torch from huggingface_hub import cached_download, hf_hub_url from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification def lowercase_ ( _lowerCamelCase : int): lowercase__ : int = [] embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight''', f'''stage{idx}.patch_embed.proj.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias''', f'''stage{idx}.patch_embed.proj.bias''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight''', f'''stage{idx}.patch_embed.norm.weight''', )) embed.append( ( f'''cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias''', f'''stage{idx}.patch_embed.norm.bias''', )) return embed def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : int): lowercase__ : Optional[Any] = [] attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked''', f'''stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_q.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_k.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj_v.bias''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.attn.proj.weight''', )) attention_weights.append( ( f'''cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.attn.proj.bias''', )) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias''', f'''stage{idx}.blocks.{cnt}.mlp.fc2.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight''', f'''stage{idx}.blocks.{cnt}.norm1.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias''', f'''stage{idx}.blocks.{cnt}.norm1.bias''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight''', f'''stage{idx}.blocks.{cnt}.norm2.weight''')) attention_weights.append( (f'''cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias''', f'''stage{idx}.blocks.{cnt}.norm2.bias''')) return attention_weights def lowercase_ ( _lowerCamelCase : Optional[int]): lowercase__ : Tuple = [] token.append((f'''cvt.encoder.stages.{idx}.cls_token''', "stage2.cls_token")) return token def lowercase_ ( ): lowercase__ : List[str] = [] head.append(("layernorm.weight", "norm.weight")) head.append(("layernorm.bias", "norm.bias")) head.append(("classifier.weight", "head.weight")) head.append(("classifier.bias", "head.bias")) return head def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int] , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[Any] = "imagenet-1k-id2label.json" lowercase__ : List[str] = 1000 lowercase__ : Dict = "huggingface/label-files" lowercase__ : List[Any] = num_labels lowercase__ : Tuple = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="dataset")) , "r")) lowercase__ : Tuple = {int(_lowerCamelCase): v for k, v in idalabel.items()} lowercase__ : Any = idalabel lowercase__ : List[Any] = {v: k for k, v in idalabel.items()} lowercase__ : Optional[int] = CvtConfig(num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase) # For depth size 13 (13 = 1+2+10) if cvt_model.rsplit("/" , 1)[-1][4:6] == "13": lowercase__ : Any = [1, 2, 10] # For depth size 21 (21 = 1+4+16) elif cvt_model.rsplit("/" , 1)[-1][4:6] == "21": lowercase__ : Tuple = [1, 4, 16] # For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20) else: lowercase__ : Union[str, Any] = [2, 2, 20] lowercase__ : Optional[Any] = [3, 12, 16] lowercase__ : Optional[Any] = [192, 768, 1024] lowercase__ : Union[str, Any] = CvtForImageClassification(_lowerCamelCase) lowercase__ : Tuple = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k") lowercase__ : int = image_size lowercase__ : Dict = torch.load(_lowerCamelCase , map_location=torch.device("cpu")) lowercase__ : Any = OrderedDict() lowercase__ : int = [] for idx in range(len(config.depth)): if config.cls_token[idx]: lowercase__ : Dict = list_of_state_dict + cls_token(_lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + embeddings(_lowerCamelCase) for cnt in range(config.depth[idx]): lowercase__ : Any = list_of_state_dict + attention(_lowerCamelCase , _lowerCamelCase) lowercase__ : List[str] = list_of_state_dict + final() for gg in list_of_state_dict: print(_lowerCamelCase) for i in range(len(_lowerCamelCase)): lowercase__ : Dict = original_weights[list_of_state_dict[i][1]] model.load_state_dict(_lowerCamelCase) model.save_pretrained(_lowerCamelCase) image_processor.save_pretrained(_lowerCamelCase) # Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al if __name__ == "__main__": UpperCamelCase = argparse.ArgumentParser() parser.add_argument( '''--cvt_model''', default='''cvt-w24''', type=str, help='''Name of the cvt model you\'d like to convert.''', ) parser.add_argument( '''--image_size''', default=384, type=int, help='''Input Image Size''', ) parser.add_argument( '''--cvt_file_name''', default=R'''cvtmodels\CvT-w24-384x384-IN-22k.pth''', type=str, help='''Input Image Size''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) UpperCamelCase = parser.parse_args() convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
87
import operator def lowercase_ ( _lowerCamelCase : list , _lowerCamelCase : bool = False , _lowerCamelCase : list | None = None): lowercase__ : int = operator.lt if reverse else operator.gt lowercase__ : str = solution or [] if not arr: return solution lowercase__ : List[str] = [arr.pop(0)] for i, item in enumerate(_lowerCamelCase): if _operator(_lowerCamelCase , sublist[-1]): sublist.append(_lowerCamelCase) arr.pop(_lowerCamelCase) # merging sublist into solution list if not solution: solution.extend(_lowerCamelCase) else: while sublist: lowercase__ : str = sublist.pop(0) for i, xx in enumerate(_lowerCamelCase): if not _operator(_lowerCamelCase , _lowerCamelCase): solution.insert(_lowerCamelCase , _lowerCamelCase) break else: solution.append(_lowerCamelCase) strand_sort(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return solution if __name__ == "__main__": assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5] assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
87
1
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
import time import warnings from abc import ABC from copy import deepcopy from typing import Optional import torch from ..utils import add_start_docstrings, logging UpperCamelCase = logging.get_logger(__name__) UpperCamelCase = R''' Args: input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and [`PreTrainedTokenizer.__call__`] for details. [What are input IDs?](../glossary#input-ids) scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs (`Dict[str, Any]`, *optional*): Additional stopping criteria specific kwargs. Return: `bool`. `False` indicates we should continue, `True` indicates we should stop. ''' class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : Optional[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed" ) class snake_case_ ( __A ): def __init__( self : Dict , lowercase_ : int , lowercase_ : Optional[int] = None ) -> List[str]: lowercase__ : str = max_length lowercase__ : Optional[int] = max_position_embeddings @add_start_docstrings(lowercase_ ) def __call__( self : Tuple , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: lowercase__ : str = input_ids.shape[-1] lowercase__ : Any = cur_len >= self.max_length if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings: logger.warning_once( "This is a friendly reminder - the current text generation call will exceed the model's predefined " F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe ''' "exceptions, performance degradation, or nothing at all." ) return is_done class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : int , lowercase_ : int ) -> List[str]: warnings.warn( "The class `MaxNewTokensCriteria` is deprecated. " F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` ''' "with `max_length = start_length + max_new_tokens` instead." , lowercase_ , ) lowercase__ : Optional[int] = start_length lowercase__ : str = max_new_tokens lowercase__ : Tuple = start_length + max_new_tokens @add_start_docstrings(lowercase_ ) def __call__( self : List[Any] , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Dict ) -> bool: return input_ids.shape[-1] >= self.max_length class snake_case_ ( __A ): def __init__( self : Tuple , lowercase_ : float , lowercase_ : Optional[float] = None ) -> Dict: lowercase__ : List[str] = max_time lowercase__ : Tuple = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(lowercase_ ) def __call__( self : int , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : Union[str, Any] ) -> bool: return time.time() - self.initial_timestamp > self.max_time class snake_case_ ( __A ): @add_start_docstrings(lowercase_ ) def __call__( self : str , lowercase_ : torch.LongTensor , lowercase_ : torch.FloatTensor , **lowercase_ : List[str] ) -> bool: return any(criteria(lowercase_ , lowercase_ ) for criteria in self ) @property def __UpperCamelCase ( self : Optional[Any] ) -> Optional[int]: for stopping_criterium in self: if isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length elif isinstance(lowercase_ , lowercase_ ): return stopping_criterium.max_length return None def lowercase_ ( _lowerCamelCase : StoppingCriteriaList , _lowerCamelCase : int): lowercase__ : Optional[int] = stopping_criteria.max_length lowercase__ : str = deepcopy(_lowerCamelCase) if stopping_max_length is not None and stopping_max_length != max_length: warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCamelCase) elif stopping_max_length is None: new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCamelCase)) return new_stopping_criteria
87
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": UpperCamelCase = pd.read_csv('''sample_data.csv''', header=None) UpperCamelCase = df.shape[:1][0] # If you're using some other dataset input the target column UpperCamelCase = df.iloc[:, 1:2] UpperCamelCase = actual_data.values.reshape(len_data, 1) UpperCamelCase = MinMaxScaler().fit_transform(actual_data) UpperCamelCase = 10 UpperCamelCase = 5 UpperCamelCase = 20 UpperCamelCase = len_data - periods * look_back UpperCamelCase = actual_data[:division] UpperCamelCase = actual_data[division - look_back :] UpperCamelCase , UpperCamelCase = [], [] UpperCamelCase , UpperCamelCase = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) UpperCamelCase = np.array(train_x) UpperCamelCase = np.array(test_x) UpperCamelCase = np.array([list(i.ravel()) for i in train_y]) UpperCamelCase = np.array([list(i.ravel()) for i in test_y]) UpperCamelCase = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='''mean_squared_error''', optimizer='''adam''') UpperCamelCase = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) UpperCamelCase = model.predict(x_test)
87
from typing import Dict import numpy as np import torch from . import residue_constants as rc from .tensor_utils import tensor_tree_map, tree_map def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Any = [] lowercase__ : Optional[int] = [] lowercase__ : Tuple = [] for rt in rc.restypes: lowercase__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]] restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names]) lowercase__ : str = {name: i for i, name in enumerate(_lowerCamelCase)} restype_atomaa_to_atomaa_list.append( [(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types]) restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names]) # Add dummy mapping for restype 'UNK' restype_atomaa_to_atomaa_list.append([0] * 14) restype_atomaa_to_atomaa_list.append([0] * 37) restype_atomaa_mask_list.append([0.0] * 14) lowercase__ : Union[str, Any] = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : str = torch.tensor( _lowerCamelCase , dtype=torch.intaa , device=protein["aatype"].device , ) lowercase__ : List[str] = torch.tensor( _lowerCamelCase , dtype=torch.floataa , device=protein["aatype"].device , ) lowercase__ : str = protein["aatype"].to(torch.long) # create the mapping for (residx, atom14) --> atom37, i.e. an array # with shape (num_res, 14) containing the atom37 indices for this protein lowercase__ : Dict = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = restype_atomaa_mask[protein_aatype] lowercase__ : List[Any] = residx_atomaa_mask lowercase__ : Optional[Any] = residx_atomaa_to_atomaa.long() # create the gather indices for mapping back lowercase__ : str = restype_atomaa_to_atomaa[protein_aatype] lowercase__ : str = residx_atomaa_to_atomaa.long() # create the corresponding mask lowercase__ : Optional[Any] = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device) for restype, restype_letter in enumerate(rc.restypes): lowercase__ : Tuple = rc.restype_atoa[restype_letter] lowercase__ : List[Any] = rc.residue_atoms[restype_name] for atom_name in atom_names: lowercase__ : Optional[int] = rc.atom_order[atom_name] lowercase__ : Tuple = 1 lowercase__ : Dict = restype_atomaa_mask[protein_aatype] lowercase__ : Any = residx_atomaa_mask return protein def lowercase_ ( _lowerCamelCase : Dict[str, torch.Tensor]): lowercase__ : Tuple = tree_map(lambda _lowerCamelCase: torch.tensor(_lowerCamelCase , device=batch["aatype"].device) , _lowerCamelCase , np.ndarray) lowercase__ : List[str] = tensor_tree_map(lambda _lowerCamelCase: np.array(_lowerCamelCase) , make_atomaa_masks(_lowerCamelCase)) return out
87
1
UpperCamelCase = range(2, 20 + 1) UpperCamelCase = [10**k for k in range(ks[-1] + 1)] UpperCamelCase = {} def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str] , _lowerCamelCase : Any): lowercase__ : List[str] = sum(a_i[j] for j in range(_lowerCamelCase , len(_lowerCamelCase))) lowercase__ : Tuple = sum(a_i[j] * base[j] for j in range(min(len(_lowerCamelCase) , _lowerCamelCase))) lowercase__ , lowercase__ : str = 0, 0 lowercase__ : Optional[Any] = n - i lowercase__ : Tuple = memo.get(_lowerCamelCase) if sub_memo is not None: lowercase__ : List[str] = sub_memo.get(_lowerCamelCase) if jumps is not None and len(_lowerCamelCase) > 0: # find and make the largest jump without going over lowercase__ : Optional[int] = -1 for _k in range(len(_lowerCamelCase) - 1 , -1 , -1): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: lowercase__ : Optional[int] = _k break if max_jump >= 0: lowercase__ , lowercase__ , lowercase__ : List[str] = jumps[max_jump] # since the difference between jumps is cached, add c lowercase__ : int = diff + c for j in range(min(_lowerCamelCase , len(_lowerCamelCase))): lowercase__ , lowercase__ : List[str] = divmod(_lowerCamelCase , 10) if new_c > 0: add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) else: lowercase__ : Optional[int] = [] else: lowercase__ : int = {c: []} lowercase__ : Tuple = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps lowercase__ , lowercase__ : Union[str, Any] = next_term(_lowerCamelCase , k - 1 , i + dn , _lowerCamelCase) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead lowercase__ , lowercase__ : List[str] = compute(_lowerCamelCase , _lowerCamelCase , i + dn , _lowerCamelCase) diff += _diff dn += terms_jumped lowercase__ : Optional[int] = sub_memo[c] # keep jumps sorted by # of terms skipped lowercase__ : Dict = 0 while j < len(_lowerCamelCase): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_lowerCamelCase , (diff, dn, k)) return (diff, dn) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]): if i >= n: return 0, i if k > len(_lowerCamelCase): a_i.extend([0 for _ in range(k - len(_lowerCamelCase))]) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) lowercase__ : Dict = i lowercase__ , lowercase__ , lowercase__ : Optional[Any] = 0, 0, 0 for j in range(len(_lowerCamelCase)): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 lowercase__ : Union[str, Any] = ds_c + ds_b diff += addend lowercase__ : Union[str, Any] = 0 for j in range(_lowerCamelCase): lowercase__ : Any = a_i[j] + addend lowercase__ , lowercase__ : Optional[Any] = divmod(_lowerCamelCase , 10) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) return diff, i - start_i def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]): for j in range(_lowerCamelCase , len(_lowerCamelCase)): lowercase__ : Tuple = digits[j] + addend if s >= 10: lowercase__ , lowercase__ : List[Any] = divmod(_lowerCamelCase , 10) lowercase__ : Dict = addend // 10 + quotient else: lowercase__ : Union[str, Any] = s lowercase__ : Optional[int] = addend // 10 if addend == 0: break while addend > 0: lowercase__ , lowercase__ : int = divmod(_lowerCamelCase , 10) digits.append(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : int = 10**15): lowercase__ : str = [1] lowercase__ : Dict = 1 lowercase__ : Optional[int] = 0 while True: lowercase__ , lowercase__ : Any = next_term(_lowerCamelCase , 20 , i + dn , _lowerCamelCase) dn += terms_jumped if dn == n - i: break lowercase__ : int = 0 for j in range(len(_lowerCamelCase)): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(f"{solution() = }")
87
import unittest from transformers import BigBirdConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask if is_flax_available(): import jax from transformers.models.big_bird.modeling_flax_big_bird import ( FlaxBigBirdForCausalLM, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForPreTraining, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, FlaxBigBirdModel, ) class snake_case_ ( unittest.TestCase ): def __init__( self : Tuple , lowercase_ : List[Any] , lowercase_ : Union[str, Any]=2 , lowercase_ : Union[str, Any]=56 , lowercase_ : Tuple=True , lowercase_ : Optional[Any]=True , lowercase_ : Optional[Any]=True , lowercase_ : int=True , lowercase_ : Any=99 , lowercase_ : int=32 , lowercase_ : str=2 , lowercase_ : Union[str, Any]=2 , lowercase_ : Dict=7 , lowercase_ : Dict="gelu_new" , lowercase_ : Tuple=0.1 , lowercase_ : List[Any]=0.1 , lowercase_ : Tuple=5_12 , lowercase_ : Optional[Any]=16 , lowercase_ : List[Any]=2 , lowercase_ : Dict=0.02 , lowercase_ : int=4 , lowercase_ : Tuple="block_sparse" , lowercase_ : Dict=True , lowercase_ : Optional[int]=False , lowercase_ : Dict=2 , lowercase_ : int=3 , ) -> Union[str, Any]: lowercase__ : Dict = parent lowercase__ : Dict = batch_size lowercase__ : Tuple = seq_length lowercase__ : Dict = is_training lowercase__ : Dict = use_attention_mask lowercase__ : Tuple = use_token_type_ids lowercase__ : Optional[int] = use_labels lowercase__ : List[Any] = vocab_size lowercase__ : Any = hidden_size lowercase__ : List[Any] = num_hidden_layers lowercase__ : Union[str, Any] = num_attention_heads lowercase__ : str = intermediate_size lowercase__ : int = hidden_act lowercase__ : str = hidden_dropout_prob lowercase__ : List[str] = attention_probs_dropout_prob lowercase__ : Optional[Any] = max_position_embeddings lowercase__ : Union[str, Any] = type_vocab_size lowercase__ : Dict = type_sequence_label_size lowercase__ : Any = initializer_range lowercase__ : List[str] = num_choices lowercase__ : str = rescale_embeddings lowercase__ : Optional[Any] = attention_type lowercase__ : Optional[int] = use_bias lowercase__ : Optional[int] = block_size lowercase__ : str = num_random_blocks def __UpperCamelCase ( self : str ) -> Optional[Any]: lowercase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowercase__ : str = None if self.use_attention_mask: lowercase__ : Any = random_attention_mask([self.batch_size, self.seq_length] ) lowercase__ : Optional[int] = None if self.use_token_type_ids: lowercase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) lowercase__ : int = BigBirdConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase_ , initializer_range=self.initializer_range , attention_type=self.attention_type , block_size=self.block_size , num_random_blocks=self.num_random_blocks , use_bias=self.use_bias , rescale_embeddings=self.rescale_embeddings , ) return config, input_ids, token_type_ids, attention_mask def __UpperCamelCase ( self : Union[str, Any] ) -> int: lowercase__ : int = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Dict = config_and_inputs lowercase__ : Union[str, Any] = { "input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_flax class snake_case_ ( __A ,unittest.TestCase ): __A : Optional[int] = ( ( FlaxBigBirdForCausalLM, FlaxBigBirdModel, FlaxBigBirdForPreTraining, FlaxBigBirdForMaskedLM, FlaxBigBirdForMultipleChoice, FlaxBigBirdForQuestionAnswering, FlaxBigBirdForSequenceClassification, FlaxBigBirdForTokenClassification, ) if is_flax_available() else () ) __A : List[str] = False __A : Any = False def __UpperCamelCase ( self : List[str] ) -> List[Any]: lowercase__ : Union[str, Any] = FlaxBigBirdModelTester(self ) @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Optional[int] ) -> Dict: super().test_from_pretrained_save_pretrained() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : List[str] ) -> Any: super().test_from_pretrained_with_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Tuple ) -> str: super().test_no_automatic_init() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : Dict ) -> Union[str, Any]: super().test_hidden_states_output() @slow def __UpperCamelCase ( self : Optional[int] ) -> Tuple: for model_class_name in self.all_model_classes: lowercase__ : Optional[Any] = model_class_name.from_pretrained("google/bigbird-roberta-base" ) self.assertIsNotNone(lowercase_ ) def __UpperCamelCase ( self : int ) -> Optional[int]: if self.test_attn_probs: super().test_attention_outputs() @slow # copied from `test_modeling_flax_common` because it takes much longer than other models def __UpperCamelCase ( self : str ) -> Any: lowercase__ , lowercase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): lowercase__ : Union[str, Any] = self._prepare_for_class(lowercase_ , lowercase_ ) lowercase__ : Optional[Any] = model_class(lowercase_ ) @jax.jit def model_jitted(lowercase_ : Tuple , lowercase_ : int=None , **lowercase_ : Dict ): return model(input_ids=lowercase_ , attention_mask=lowercase_ , **lowercase_ ) with self.subTest("JIT Enabled" ): lowercase__ : int = model_jitted(**lowercase_ ).to_tuple() with self.subTest("JIT Disabled" ): with jax.disable_jit(): lowercase__ : Any = model_jitted(**lowercase_ ).to_tuple() self.assertEqual(len(lowercase_ ) , len(lowercase_ ) ) for jitted_output, output in zip(lowercase_ , lowercase_ ): self.assertEqual(jitted_output.shape , output.shape ) def __UpperCamelCase ( self : List[Any] , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Optional[int] , lowercase_ : List[Any]=1E-5 , lowercase_ : Any="outputs" , lowercase_ : List[str]=None ) -> List[Any]: # `bigbird_block_sparse_attention` in `FlaxBigBird` returns `attention_probs = None`, while in PyTorch version, # an effort was done to return `attention_probs` (yet to be verified). if name.startswith("outputs.attentions" ): return else: super().check_pt_flax_outputs(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
87
1
from dataclasses import dataclass, field from typing import ClassVar, Dict from ..features import Features, Sequence, Value from .base import TaskTemplate @dataclass(frozen=__A ) class snake_case_ ( __A ): # `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization __A : str = field(default="question-answering-extractive" ,metadata={"include_in_asdict_even_if_is_default": True} ) __A : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} ) __A : ClassVar[Features] = Features( { "answers": Sequence( { "text": Value("string" ), "answer_start": Value("int32" ), } ) } ) __A : str = "question" __A : str = "context" __A : str = "answers" @property def __UpperCamelCase ( self : Dict ) -> Dict[str, str]: return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available UpperCamelCase = { '''configuration_groupvit''': [ '''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GroupViTConfig''', '''GroupViTOnnxConfig''', '''GroupViTTextConfig''', '''GroupViTVisionConfig''', ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GroupViTModel''', '''GroupViTPreTrainedModel''', '''GroupViTTextModel''', '''GroupViTVisionModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = [ '''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFGroupViTModel''', '''TFGroupViTPreTrainedModel''', '''TFGroupViTTextModel''', '''TFGroupViTVisionModel''', ] if TYPE_CHECKING: from .configuration_groupvit import ( GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, GroupViTConfig, GroupViTOnnxConfig, GroupViTTextConfig, GroupViTVisionConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_groupvit import ( GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, GroupViTModel, GroupViTPreTrainedModel, GroupViTTextModel, GroupViTVisionModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_groupvit import ( TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST, TFGroupViTModel, TFGroupViTPreTrainedModel, TFGroupViTTextModel, TFGroupViTVisionModel, ) else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
def lowercase_ ( _lowerCamelCase : int = 200_0000): lowercase__ : List[Any] = [0 for i in range(n + 1)] lowercase__ : Dict = 1 lowercase__ : List[Any] = 1 for i in range(2 , int(n**0.5) + 1): if primality_list[i] == 0: for j in range(i * i , n + 1 , _lowerCamelCase): lowercase__ : Union[str, Any] = 1 lowercase__ : List[Any] = 0 for i in range(_lowerCamelCase): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f"{solution() = }")
87
import io import json import fsspec import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.json import JsonDatasetReader, JsonDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : int): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : str): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Dict , _lowerCamelCase : Dict): lowercase__ : List[Any] = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_3": "float64", "col_1": "string", "col_2": "int64"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : Any , _lowerCamelCase : List[str]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : Tuple = {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : List[Any] = features.copy() if features else default_expected_features lowercase__ : int = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_3", "col_1", "col_2"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int]): # jsonl_312_path features are {"col_3": "float64", "col_1": "string", "col_2": "int64"} lowercase__ : Any = {"col_2": "int64", "col_3": "float64", "col_1": "string"} lowercase__ : str = features.copy() lowercase__ : str = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 2 assert dataset.num_columns == 3 assert dataset.column_names == ["col_2", "col_3", "col_1"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str]): lowercase__ : Union[str, Any] = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : int): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Tuple = jsonl_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : str = [jsonl_path] lowercase__ : str = tmp_path / "cache" lowercase__ : Optional[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : Optional[Any] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : List[str] = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[Any] = JsonDatasetReader({"train": jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : Any , _lowerCamelCase : List[str] , _lowerCamelCase : List[str]): lowercase__ : str = tmp_path / "cache" lowercase__ : Tuple = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Tuple = features.copy() if features else default_expected_features lowercase__ : Union[str, Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Tuple = JsonDatasetReader({"train": jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : Tuple): if split: lowercase__ : Tuple = {split: jsonl_path} else: lowercase__ : Tuple = "train" lowercase__ : int = {"train": jsonl_path, "test": jsonl_path} lowercase__ : Dict = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Union[str, Any] = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Union[str, Any]): return json.load(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[int]): return [json.loads(_lowerCamelCase) for line in buffer] class snake_case_ : @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Any , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[Any]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ ).write() buffer.seek(0 ) lowercase__ : Optional[int] = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : str , lowercase_ : int , lowercase_ : str , lowercase_ : Union[str, Any] , lowercase_ : Dict , lowercase_ : Tuple ) -> List[str]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ ).write() buffer.seek(0 ) lowercase__ : str = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 @pytest.mark.parametrize("lines, load_json_function" , [(True, load_json_lines), (False, load_json)] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Optional[int]: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : str = load_json_function(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) assert isinstance(exported_content[0] , lowercase_ ) assert len(lowercase_ ) == 10 @pytest.mark.parametrize( "orient, container, keys, len_at" , [ ("records", list, {"tokens", "labels", "answers", "id"}, None), ("split", dict, {"columns", "data"}, "data"), ("index", dict, set("0123456789" ), None), ("columns", dict, {"tokens", "labels", "answers", "id"}, "tokens"), ("values", list, None, None), ("table", dict, {"schema", "data"}, "data"), ] , ) def __UpperCamelCase ( self : Union[str, Any] , lowercase_ : Dict , lowercase_ : Dict , lowercase_ : Optional[int] , lowercase_ : Optional[Any] , lowercase_ : Dict ) -> Any: with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , lines=lowercase_ , orient=lowercase_ , num_proc=2 ).write() buffer.seek(0 ) lowercase__ : Optional[Any] = load_json(lowercase_ ) assert isinstance(lowercase_ , lowercase_ ) if keys: if container is dict: assert exported_content.keys() == keys else: assert exported_content[0].keys() == keys else: assert not hasattr(lowercase_ , "keys" ) and not hasattr(exported_content[0] , "keys" ) if len_at: assert len(exported_content[len_at] ) == 10 else: assert len(lowercase_ ) == 10 def __UpperCamelCase ( self : Dict , lowercase_ : List[str] ) -> str: with pytest.raises(lowercase_ ): with io.BytesIO() as buffer: JsonDatasetWriter(lowercase_ , lowercase_ , num_proc=0 ) @pytest.mark.parametrize("compression, extension" , [("gzip", "gz"), ("bz2", "bz2"), ("xz", "xz")] ) def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[str] , lowercase_ : Optional[Any] , lowercase_ : str , lowercase_ : List[Any] ) -> Any: lowercase__ : Dict = tmp_path_factory.mktemp("data" ) / F'''test.json.{extension}''' lowercase__ : Optional[int] = str(shared_datadir / F'''test_file.json.{extension}''' ) JsonDatasetWriter(lowercase_ , lowercase_ , compression=lowercase_ ).write() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : List[Any] = f.read() with fsspec.open(lowercase_ , "rb" , compression="infer" ) as f: lowercase__ : str = f.read() assert exported_content == original_content
87
1
import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]): assert isinstance(_lowerCamelCase , _lowerCamelCase) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]): lowercase__ : Tuple = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : Optional[int] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any]): lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : int = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : Optional[int] = features.copy() if features else default_expected_features lowercase__ : Any = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : Dict = ParquetDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str): lowercase__ : Any = tmp_path / "cache" lowercase__ : str = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : str = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase) assert dataset.split == split if split else "train" @pytest.mark.parametrize("path_type" , [str, list]) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any]): if issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : int = parquet_path elif issubclass(_lowerCamelCase , _lowerCamelCase): lowercase__ : Optional[Any] = [parquet_path] lowercase__ : Any = tmp_path / "cache" lowercase__ : Union[str, Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[str] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_parquet_dataset(_lowerCamelCase , _lowerCamelCase) def lowercase_ ( _lowerCamelCase : List[Any] , _lowerCamelCase : Any , _lowerCamelCase : List[str]=("train",)): assert isinstance(_lowerCamelCase , _lowerCamelCase) for split in splits: lowercase__ : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("keep_in_memory" , [False, True]) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : str): lowercase__ : Any = tmp_path / "cache" lowercase__ : List[Any] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): lowercase__ : int = ParquetDatasetReader( {"train": parquet_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase).read() _check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize( "features" , [ None, {"col_1": "string", "col_2": "int64", "col_3": "float64"}, {"col_1": "string", "col_2": "string", "col_3": "string"}, {"col_1": "int32", "col_2": "int32", "col_3": "int32"}, {"col_1": "float32", "col_2": "float32", "col_3": "float32"}, ] , ) def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Optional[Any]): lowercase__ : Optional[Any] = tmp_path / "cache" lowercase__ : List[str] = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : int = features.copy() if features else default_expected_features lowercase__ : List[Any] = ( Features({feature: Value(_lowerCamelCase) for feature, dtype in features.items()}) if features is not None else None ) lowercase__ : List[Any] = ParquetDatasetReader({"train": parquet_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase) @pytest.mark.parametrize("split" , [None, NamedSplit("train"), "train", "test"]) def lowercase_ ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Dict): if split: lowercase__ : List[Any] = {split: parquet_path} else: lowercase__ : Dict = "train" lowercase__ : List[str] = {"train": parquet_path, "test": parquet_path} lowercase__ : Optional[int] = tmp_path / "cache" lowercase__ : Any = {"col_1": "string", "col_2": "int64", "col_3": "float64"} lowercase__ : List[str] = ParquetDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase).read() _check_parquet_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys())) assert all(dataset[split].split == split for split in path.keys()) def lowercase_ ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int]): lowercase__ : Dict = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet") assert writer.write() > 0 lowercase__ : Tuple = pq.ParquetFile(tmp_path / "foo.parquet") lowercase__ : List[str] = pf.read() assert dataset.data.table == output_table def lowercase_ ( _lowerCamelCase : List[str] , _lowerCamelCase : List[Any]): lowercase__ : Any = str(shared_datadir / "test_image_rgb.jpg") lowercase__ : Any = {"image": [image_path]} lowercase__ : Dict = Features({"image": Image()}) lowercase__ : Tuple = Dataset.from_dict(_lowerCamelCase , features=_lowerCamelCase) lowercase__ : Tuple = ParquetDatasetWriter(_lowerCamelCase , tmp_path / "foo.parquet") assert writer.write() > 0 lowercase__ : List[Any] = Dataset.from_parquet(str(tmp_path / "foo.parquet")) assert dataset.features == reloaded_dataset.features lowercase__ : int = ParquetDatasetReader(str(tmp_path / "foo.parquet") , streaming=_lowerCamelCase).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( "feature, expected" , [ (Features({"foo": Value("int32")}), None), (Features({"image": Image(), "foo": Value("int32")}), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"nested": Sequence(Audio())}), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowercase_ ( _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any]): assert get_writer_batch_size(_lowerCamelCase) == expected
87
import warnings from typing import List, Optional, Union from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class snake_case_ ( __A ): __A : Optional[Any] = ["image_processor", "tokenizer"] __A : Tuple = "LayoutLMv3ImageProcessor" __A : List[Any] = ("LayoutLMv3Tokenizer", "LayoutLMv3TokenizerFast") def __init__( self : Union[str, Any] , lowercase_ : int=None , lowercase_ : str=None , **lowercase_ : Optional[Any] ) -> Optional[int]: lowercase__ : Union[str, Any] = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowercase_ , ) lowercase__ : Optional[int] = kwargs.pop("feature_extractor" ) lowercase__ : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowercase_ , lowercase_ ) def __call__( self : Dict , lowercase_ : Optional[Any] , lowercase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , lowercase_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , lowercase_ : Union[List[List[int]], List[List[List[int]]]] = None , lowercase_ : Optional[Union[List[int], List[List[int]]]] = None , lowercase_ : bool = True , lowercase_ : Union[bool, str, PaddingStrategy] = False , lowercase_ : Union[bool, str, TruncationStrategy] = None , lowercase_ : Optional[int] = None , lowercase_ : int = 0 , lowercase_ : Optional[int] = None , lowercase_ : Optional[bool] = None , lowercase_ : Optional[bool] = None , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = True , lowercase_ : Optional[Union[str, TensorType]] = None , **lowercase_ : Dict , ) -> BatchEncoding: # verify input if self.image_processor.apply_ocr and (boxes is not None): raise ValueError( "You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." ) if self.image_processor.apply_ocr and (word_labels is not None): raise ValueError( "You cannot provide word labels if you initialized the image processor with apply_ocr set to True." ) # first, apply the image processor lowercase__ : Union[str, Any] = self.image_processor(images=lowercase_ , return_tensors=lowercase_ ) # second, apply the tokenizer if text is not None and self.image_processor.apply_ocr and text_pair is None: if isinstance(lowercase_ , lowercase_ ): lowercase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension) lowercase__ : Any = features["words"] lowercase__ : Tuple = self.tokenizer( text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , ) # add pixel values lowercase__ : Optional[int] = features.pop("pixel_values" ) if return_overflowing_tokens is True: lowercase__ : Dict = self.get_overflowing_images(lowercase_ , encoded_inputs["overflow_to_sample_mapping"] ) lowercase__ : str = images return encoded_inputs def __UpperCamelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : List[Any] ) -> Dict: # in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image lowercase__ : Tuple = [] for sample_idx in overflow_to_sample_mapping: images_with_overflow.append(images[sample_idx] ) if len(lowercase_ ) != len(lowercase_ ): raise ValueError( "Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got" F''' {len(lowercase_ )} and {len(lowercase_ )}''' ) return images_with_overflow def __UpperCamelCase ( self : int , *lowercase_ : Union[str, Any] , **lowercase_ : List[str] ) -> Union[str, Any]: return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Union[str, Any] , *lowercase_ : str , **lowercase_ : int ) -> Dict: return self.tokenizer.decode(*lowercase_ , **lowercase_ ) @property def __UpperCamelCase ( self : Any ) -> Any: return ["input_ids", "bbox", "attention_mask", "pixel_values"] @property def __UpperCamelCase ( self : Optional[int] ) -> Optional[Any]: warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowercase_ , ) return self.image_processor_class @property def __UpperCamelCase ( self : List[Any] ) -> Tuple: warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowercase_ , ) return self.image_processor
87
1
import inspect import unittest from transformers import MobileNetVaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import MobileNetVaImageProcessor class snake_case_ ( __A ): def __UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: lowercase__ : List[str] = self.config_class(**self.inputs_dict ) self.parent.assertTrue(hasattr(lowercase_ , "tf_padding" ) ) self.parent.assertTrue(hasattr(lowercase_ , "depth_multiplier" ) ) class snake_case_ : def __init__( self : Optional[Any] , lowercase_ : Dict , lowercase_ : List[Any]=13 , lowercase_ : List[Any]=3 , lowercase_ : Tuple=32 , lowercase_ : int=0.25 , lowercase_ : List[Any]=8 , lowercase_ : Tuple=8 , lowercase_ : List[str]=6 , lowercase_ : Tuple=32 , lowercase_ : str=True , lowercase_ : List[str]=True , lowercase_ : Tuple=True , lowercase_ : Optional[Any]="relu6" , lowercase_ : str=12_80 , lowercase_ : List[Any]=0.1 , lowercase_ : Optional[int]=0.02 , lowercase_ : Union[str, Any]=True , lowercase_ : Optional[int]=True , lowercase_ : Any=10 , lowercase_ : Any=None , ) -> Dict: lowercase__ : Tuple = parent lowercase__ : Union[str, Any] = batch_size lowercase__ : Optional[Any] = num_channels lowercase__ : List[Any] = image_size lowercase__ : Union[str, Any] = depth_multiplier lowercase__ : Dict = depth_divisible_by lowercase__ : Any = min_depth lowercase__ : Any = expand_ratio lowercase__ : Union[str, Any] = tf_padding lowercase__ : Dict = output_stride lowercase__ : Optional[Any] = first_layer_is_expansion lowercase__ : Optional[Any] = finegrained_output lowercase__ : Optional[int] = hidden_act lowercase__ : List[str] = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier ) lowercase__ : Union[str, Any] = classifier_dropout_prob lowercase__ : List[str] = use_labels lowercase__ : Tuple = is_training lowercase__ : Optional[int] = num_labels lowercase__ : str = initializer_range lowercase__ : Union[str, Any] = scope def __UpperCamelCase ( self : List[Any] ) -> Any: lowercase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase__ : Optional[Any] = None lowercase__ : str = None if self.use_labels: lowercase__ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels ) lowercase__ : Optional[int] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) lowercase__ : Union[str, Any] = self.get_config() return config, pixel_values, labels, pixel_labels def __UpperCamelCase ( self : str ) -> str: return MobileNetVaConfig( num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , ) def __UpperCamelCase ( self : Tuple , lowercase_ : int , lowercase_ : Optional[Any] , lowercase_ : Tuple , lowercase_ : str ) -> Optional[int]: lowercase__ : List[Any] = MobileNetVaModel(config=lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Union[str, Any] = model(lowercase_ ) self.parent.assertEqual( result.last_hidden_state.shape , ( self.batch_size, self.last_hidden_size, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) self.parent.assertEqual( result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , ) def __UpperCamelCase ( self : Optional[int] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : List[str] , lowercase_ : Tuple ) -> List[Any]: lowercase__ : Tuple = self.num_labels lowercase__ : List[str] = MobileNetVaForImageClassification(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : Optional[int] = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Any , lowercase_ : Any , lowercase_ : List[Any] , lowercase_ : List[str] , lowercase_ : List[str] ) -> Union[str, Any]: lowercase__ : Any = self.num_labels lowercase__ : List[str] = MobileNetVaForSemanticSegmentation(lowercase_ ) model.to(lowercase_ ) model.eval() lowercase__ : str = model(lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) lowercase__ : Tuple = model(lowercase_ , labels=lowercase_ ) self.parent.assertEqual( result.logits.shape , ( self.batch_size, self.num_labels, self.image_size // self.output_stride, self.image_size // self.output_stride, ) , ) def __UpperCamelCase ( self : str ) -> str: lowercase__ : Tuple = self.prepare_config_and_inputs() lowercase__ , lowercase__ , lowercase__ , lowercase__ : Optional[Any] = config_and_inputs lowercase__ : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class snake_case_ ( __A ,__A ,unittest.TestCase ): __A : Any = ( (MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation) if is_torch_available() else () ) __A : Tuple = ( { "feature-extraction": MobileNetVaModel, "image-classification": MobileNetVaForImageClassification, "image-segmentation": MobileNetVaForSemanticSegmentation, } if is_torch_available() else {} ) __A : List[str] = False __A : Dict = False __A : Union[str, Any] = False __A : Optional[Any] = False def __UpperCamelCase ( self : List[Any] ) -> int: lowercase__ : List[Any] = MobileNetVaModelTester(self ) lowercase__ : Dict = MobileNetVaConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ ) def __UpperCamelCase ( self : Optional[Any] ) -> Any: self.config_tester.run_common_tests() @unittest.skip(reason="MobileNetV2 does not use inputs_embeds" ) def __UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: pass @unittest.skip(reason="MobileNetV2 does not support input and output embeddings" ) def __UpperCamelCase ( self : Tuple ) -> Dict: pass @unittest.skip(reason="MobileNetV2 does not output attentions" ) def __UpperCamelCase ( self : Tuple ) -> str: pass def __UpperCamelCase ( self : int ) -> int: lowercase__ , lowercase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Optional[int] = model_class(lowercase_ ) lowercase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase__ : Optional[int] = [*signature.parameters.keys()] lowercase__ : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> Tuple: lowercase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def __UpperCamelCase ( self : int ) -> str: def check_hidden_states_output(lowercase_ : List[str] , lowercase_ : int , lowercase_ : Union[str, Any] ): lowercase__ : Optional[int] = model_class(lowercase_ ) model.to(lowercase_ ) model.eval() with torch.no_grad(): lowercase__ : Any = model(**self._prepare_for_class(lowercase_ , lowercase_ ) ) lowercase__ : int = outputs.hidden_states lowercase__ : Tuple = 16 self.assertEqual(len(lowercase_ ) , lowercase_ ) lowercase__ , lowercase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase__ : Union[str, Any] = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase__ : Dict = True check_hidden_states_output(lowercase_ , lowercase_ , lowercase_ ) def __UpperCamelCase ( self : Optional[int] ) -> int: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowercase_ ) def __UpperCamelCase ( self : int ) -> Tuple: lowercase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ ) @slow def __UpperCamelCase ( self : Tuple ) -> Union[str, Any]: for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase__ : Optional[int] = MobileNetVaModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) def lowercase_ ( ): lowercase__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png") return image @require_torch @require_vision class snake_case_ ( unittest.TestCase ): @cached_property def __UpperCamelCase ( self : Any ) -> Optional[int]: return ( MobileNetVaImageProcessor.from_pretrained("google/mobilenet_v2_1.0_224" ) if is_vision_available() else None ) @slow def __UpperCamelCase ( self : List[str] ) -> Tuple: lowercase__ : List[str] = MobileNetVaForImageClassification.from_pretrained("google/mobilenet_v2_1.0_224" ).to(lowercase_ ) lowercase__ : List[str] = self.default_image_processor lowercase__ : Any = prepare_img() lowercase__ : Optional[int] = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase__ : Any = model(**lowercase_ ) # verify the logits lowercase__ : Dict = torch.Size((1, 10_01) ) self.assertEqual(outputs.logits.shape , lowercase_ ) lowercase__ : str = torch.tensor([0.24_45, -1.19_93, 0.19_05] ).to(lowercase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) ) @slow def __UpperCamelCase ( self : Union[str, Any] ) -> List[str]: lowercase__ : Tuple = MobileNetVaForSemanticSegmentation.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) lowercase__ : List[Any] = model.to(lowercase_ ) lowercase__ : List[str] = MobileNetVaImageProcessor.from_pretrained("google/deeplabv3_mobilenet_v2_1.0_513" ) lowercase__ : int = prepare_img() lowercase__ : Tuple = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ ) # forward pass with torch.no_grad(): lowercase__ : List[Any] = model(**lowercase_ ) lowercase__ : Dict = outputs.logits # verify the logits lowercase__ : Any = torch.Size((1, 21, 65, 65) ) self.assertEqual(logits.shape , lowercase_ ) lowercase__ : int = torch.tensor( [ [[17.57_90, 17.75_81, 18.33_55], [18.32_57, 18.42_30, 18.89_73], [18.61_69, 18.86_50, 19.21_87]], [[-2.15_95, -2.09_77, -2.37_41], [-2.42_26, -2.30_28, -2.68_35], [-2.78_19, -2.59_91, -2.77_06]], [[4.20_58, 4.83_17, 4.76_38], [4.41_36, 5.03_61, 4.93_83], [4.50_28, 4.96_44, 4.87_34]], ] , device=lowercase_ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase_ , atol=1E-4 ) )
87
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging UpperCamelCase = logging.get_logger(__name__) if is_vision_available(): import PIL class snake_case_ ( __A ): __A : str = ["pixel_values"] def __init__( self : int , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : bool = True , lowercase_ : Dict[str, int] = None , lowercase_ : bool = True , lowercase_ : Union[int, float] = 1 / 2_55 , lowercase_ : bool = True , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = True , **lowercase_ : Union[str, Any] , ) -> None: super().__init__(**lowercase_ ) lowercase__ : Tuple = size if size is not None else {"shortest_edge": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ ) lowercase__ : List[str] = crop_size if crop_size is not None else {"height": 2_24, "width": 2_24} lowercase__ : Tuple = get_size_dict(lowercase_ , default_to_square=lowercase_ , param_name="crop_size" ) lowercase__ : Dict = do_resize lowercase__ : List[Any] = size lowercase__ : int = resample lowercase__ : Union[str, Any] = do_center_crop lowercase__ : Optional[int] = crop_size lowercase__ : List[str] = do_rescale lowercase__ : int = rescale_factor lowercase__ : List[Any] = do_normalize lowercase__ : Union[str, Any] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowercase__ : str = image_std if image_std is not None else OPENAI_CLIP_STD lowercase__ : Dict = do_convert_rgb def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : PILImageResampling = PILImageResampling.BICUBIC , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Union[str, Any] , ) -> np.ndarray: lowercase__ : str = get_size_dict(lowercase_ , default_to_square=lowercase_ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowercase__ : Dict = get_resize_output_image_size(lowercase_ , size=size["shortest_edge"] , default_to_square=lowercase_ ) return resize(lowercase_ , size=lowercase_ , resample=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : int , lowercase_ : np.ndarray , lowercase_ : Dict[str, int] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : int , ) -> np.ndarray: lowercase__ : Optional[Any] = get_size_dict(lowercase_ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(lowercase_ , size=(size["height"], size["width"]) , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : np.ndarray , lowercase_ : Union[int, float] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : Optional[Any] , ) -> Any: return rescale(lowercase_ , scale=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : str , lowercase_ : np.ndarray , lowercase_ : Union[float, List[float]] , lowercase_ : Union[float, List[float]] , lowercase_ : Optional[Union[str, ChannelDimension]] = None , **lowercase_ : str , ) -> np.ndarray: return normalize(lowercase_ , mean=lowercase_ , std=lowercase_ , data_format=lowercase_ , **lowercase_ ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : ImageInput , lowercase_ : bool = None , lowercase_ : Dict[str, int] = None , lowercase_ : PILImageResampling = None , lowercase_ : bool = None , lowercase_ : int = None , lowercase_ : bool = None , lowercase_ : float = None , lowercase_ : bool = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : Optional[Union[float, List[float]]] = None , lowercase_ : bool = None , lowercase_ : Optional[Union[str, TensorType]] = None , lowercase_ : Optional[ChannelDimension] = ChannelDimension.FIRST , **lowercase_ : Union[str, Any] , ) -> PIL.Image.Image: lowercase__ : int = do_resize if do_resize is not None else self.do_resize lowercase__ : Dict = size if size is not None else self.size lowercase__ : List[Any] = get_size_dict(lowercase_ , param_name="size" , default_to_square=lowercase_ ) lowercase__ : Dict = resample if resample is not None else self.resample lowercase__ : int = do_center_crop if do_center_crop is not None else self.do_center_crop lowercase__ : Dict = crop_size if crop_size is not None else self.crop_size lowercase__ : List[str] = get_size_dict(lowercase_ , param_name="crop_size" , default_to_square=lowercase_ ) lowercase__ : int = do_rescale if do_rescale is not None else self.do_rescale lowercase__ : Dict = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize lowercase__ : int = image_mean if image_mean is not None else self.image_mean lowercase__ : List[str] = image_std if image_std is not None else self.image_std lowercase__ : Union[str, Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowercase__ : Union[str, Any] = make_list_of_images(lowercase_ ) if not valid_images(lowercase_ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowercase__ : Dict = [convert_to_rgb(lowercase_ ) for image in images] # All transformations expect numpy arrays. lowercase__ : Optional[Any] = [to_numpy_array(lowercase_ ) for image in images] if do_resize: lowercase__ : List[Any] = [self.resize(image=lowercase_ , size=lowercase_ , resample=lowercase_ ) for image in images] if do_center_crop: lowercase__ : int = [self.center_crop(image=lowercase_ , size=lowercase_ ) for image in images] if do_rescale: lowercase__ : str = [self.rescale(image=lowercase_ , scale=lowercase_ ) for image in images] if do_normalize: lowercase__ : Optional[int] = [self.normalize(image=lowercase_ , mean=lowercase_ , std=lowercase_ ) for image in images] lowercase__ : Optional[Any] = [to_channel_dimension_format(lowercase_ , lowercase_ ) for image in images] lowercase__ : List[str] = {"pixel_values": images} return BatchFeature(data=lowercase_ , tensor_type=lowercase_ )
87
1
from __future__ import annotations import sys from collections import deque from typing import Generic, TypeVar UpperCamelCase = TypeVar('''T''') class snake_case_ ( Generic[T] ): __A : deque[T] # Cache store of keys __A : set[T] # References of the keys in cache __A : int = 10 # Maximum capacity of cache def __init__( self : Union[str, Any] , lowercase_ : int ) -> None: lowercase__ : int = deque() lowercase__ : str = set() if not n: lowercase__ : str = sys.maxsize elif n < 0: raise ValueError("n should be an integer greater than 0." ) else: lowercase__ : List[Any] = n def __UpperCamelCase ( self : Dict , lowercase_ : T ) -> None: if x not in self.key_reference: if len(self.dq_store ) == LRUCache._MAX_CAPACITY: lowercase__ : Dict = self.dq_store.pop() self.key_reference.remove(lowercase_ ) else: self.dq_store.remove(lowercase_ ) self.dq_store.appendleft(lowercase_ ) self.key_reference.add(lowercase_ ) def __UpperCamelCase ( self : Dict ) -> None: for k in self.dq_store: print(lowercase_ ) def __repr__( self : Optional[int] ) -> str: return F'''LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}''' if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase = LRUCache(4) lru_cache.refer('''A''') lru_cache.refer(2) lru_cache.refer(3) lru_cache.refer('''A''') lru_cache.refer(4) lru_cache.refer(5) lru_cache.display() print(lru_cache) assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
87
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available UpperCamelCase = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase = ['''GPTSw3Tokenizer'''] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_swa import GPTSwaTokenizer else: import sys UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
87
1
import os import random import sys from . import cryptomath_module as cryptomath from . import rabin_miller UpperCamelCase = 3 def lowercase_ ( _lowerCamelCase : int): print("Generating primitive root of p") while True: lowercase__ : str = random.randrange(3 , _lowerCamelCase) if pow(_lowerCamelCase , 2 , _lowerCamelCase) == 1: continue if pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) == 1: continue return g def lowercase_ ( _lowerCamelCase : int): print("Generating prime p...") lowercase__ : Optional[Any] = rabin_miller.generate_large_prime(_lowerCamelCase) # select large prime number. lowercase__ : int = primitive_root(_lowerCamelCase) # one primitive root on modulo p. lowercase__ : str = random.randrange(3 , _lowerCamelCase) # private_key -> have to be greater than 2 for safety. lowercase__ : List[Any] = cryptomath.find_mod_inverse(pow(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) , _lowerCamelCase) lowercase__ : Optional[Any] = (key_size, e_a, e_a, p) lowercase__ : Any = (key_size, d) return public_key, private_key def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int): if os.path.exists(f'''{name}_pubkey.txt''') or os.path.exists(f'''{name}_privkey.txt'''): print("\nWARNING:") print( f'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n''' "Use a different name or delete these files and re-run this program.") sys.exit() lowercase__ , lowercase__ : Union[str, Any] = generate_key(_lowerCamelCase) print(f'''\nWriting public key to file {name}_pubkey.txt...''') with open(f'''{name}_pubkey.txt''' , "w") as fo: fo.write(f'''{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}''') print(f'''Writing private key to file {name}_privkey.txt...''') with open(f'''{name}_privkey.txt''' , "w") as fo: fo.write(f'''{private_key[0]},{private_key[1]}''') def lowercase_ ( ): print("Making key files...") make_key_files("elgamal" , 2048) print("Key files generation successful") if __name__ == "__main__": main()
87
UpperCamelCase = [0, 2, 4, 6, 8] UpperCamelCase = [1, 3, 5, 7, 9] def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : list[int] , _lowerCamelCase : int): if remaining_length == 0: if digits[0] == 0 or digits[-1] == 0: return 0 for i in range(length // 2 - 1 , -1 , -1): remainder += digits[i] + digits[length - i - 1] if remainder % 2 == 0: return 0 remainder //= 10 return 1 if remaining_length == 1: if remainder % 2 == 0: return 0 lowercase__ : str = 0 for digit in range(10): lowercase__ : str = digit result += reversible_numbers( 0 , (remainder + 2 * digit) // 10 , _lowerCamelCase , _lowerCamelCase) return result lowercase__ : Dict = 0 for digita in range(10): lowercase__ : int = digita if (remainder + digita) % 2 == 0: lowercase__ : Optional[Any] = ODD_DIGITS else: lowercase__ : str = EVEN_DIGITS for digita in other_parity_digits: lowercase__ : List[str] = digita result += reversible_numbers( remaining_length - 2 , (remainder + digita + digita) // 10 , _lowerCamelCase , _lowerCamelCase , ) return result def lowercase_ ( _lowerCamelCase : int = 9): lowercase__ : Tuple = 0 for length in range(1 , max_power + 1): result += reversible_numbers(_lowerCamelCase , 0 , [0] * length , _lowerCamelCase) return result if __name__ == "__main__": print(f"{solution() = }")
87
1
import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy UpperCamelCase = logging.getLogger(__name__) UpperCamelCase = '''pytorch_model.bin''' @dataclasses.dataclass class snake_case_ : __A : str = dataclasses.field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} ) __A : Optional[str] = dataclasses.field( default=__A ,metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} ,) @dataclasses.dataclass class snake_case_ : __A : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} ) __A : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} ) __A : Optional[str] = dataclasses.field( default=__A ,metadata={"help": "A csv or a json file containing the validation data."} ) __A : Optional[str] = dataclasses.field( default=__A ,metadata={"help": "The name of the task to train on."} ,) __A : Optional[List[str]] = dataclasses.field( default=__A ,metadata={"help": "The list of labels for the task."} ) @dataclasses.dataclass class snake_case_ : __A : str = dataclasses.field( metadata={"help": "The output directory where the model predictions and checkpoints will be written."} ) __A : Optional[str] = dataclasses.field( default="accuracy" ,metadata={"help": "The evaluation metric used for the task."} ) __A : Optional[str] = dataclasses.field( default="no" ,metadata={ "help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]" } ,) __A : Optional[int] = dataclasses.field( default=10 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) __A : Optional[float] = dataclasses.field( default=0.0 ,metadata={ "help": "How much the specified evaluation metric must improve to satisfy early stopping conditions." } ,) __A : Optional[bool] = dataclasses.field( default=__A ,metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} ,) __A : Optional[bool] = dataclasses.field( default=__A ,metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} ,) __A : Optional[bool] = dataclasses.field( default=__A ,metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} ,) __A : Optional[float] = dataclasses.field( default=0.0 ,metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} ,) __A : Optional[int] = dataclasses.field( default=100 ,metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} ,) __A : Optional[int] = dataclasses.field( default=__A ,metadata={"help": "Random seed for initialization."} ,) def lowercase_ ( _lowerCamelCase : Tuple , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Any , _lowerCamelCase : List[Any]): lowercase__ : Optional[int] = datasets.concatenate_datasets([infer_input, infer_output] , axis=1) if args.do_filter_by_confidence: lowercase__ : Dict = dataset.filter(lambda _lowerCamelCase: example["probability"] > args.confidence_threshold) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 lowercase__ : int = int(eval_result * len(_lowerCamelCase)) print(_lowerCamelCase) lowercase__ : str = dataset.sort("probability" , reverse=_lowerCamelCase) lowercase__ : str = dataset.select(range(_lowerCamelCase)) lowercase__ : Optional[Any] = dataset.remove_columns(["label", "probability"]) lowercase__ : Any = dataset.rename_column("prediction" , "label") lowercase__ : Any = dataset.map(lambda _lowerCamelCase: {"label": idalabel[example["label"]]}) lowercase__ : List[Any] = dataset.shuffle(seed=args.seed) lowercase__ : List[str] = os.path.join(_lowerCamelCase , f'''train_pseudo.{args.data_file_extension}''') if args.data_file_extension == "csv": dataset.to_csv(_lowerCamelCase , index=_lowerCamelCase) else: dataset.to_json(_lowerCamelCase) def lowercase_ ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[int] , **_lowerCamelCase : str): lowercase__ : List[Any] = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , ) logger.info(accelerator.state) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() lowercase__ : Optional[int] = STModelArguments(model_name_or_path=_lowerCamelCase) lowercase__ : Dict = STDataArguments(train_file=_lowerCamelCase , infer_file=_lowerCamelCase) lowercase__ : List[str] = STTrainingArguments(output_dir=_lowerCamelCase) lowercase__ : Union[str, Any] = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(_lowerCamelCase).items(): setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) for key, value in kwargs.items(): if hasattr(_lowerCamelCase , _lowerCamelCase): setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase) # Sanity checks lowercase__ : Optional[int] = {} lowercase__ : List[Any] = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None lowercase__ : int = args.train_file lowercase__ : Tuple = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None lowercase__ : List[Any] = args.eval_file for key in data_files: lowercase__ : str = data_files[key].split(".")[-1] assert extension in ["csv", "json"], f'''`{key}_file` should be a csv or a json file.''' if args.data_file_extension is None: lowercase__ : int = extension else: assert extension == args.data_file_extension, f'''`{key}_file` should be a {args.data_file_extension} file`.''' assert ( args.eval_metric in datasets.list_metrics() ), f'''{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.''' # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) logger.info("Creating the initial data directory for self-training...") lowercase__ : Optional[int] = f'''{args.output_dir}/self-train_iter-{{}}'''.format lowercase__ : str = data_dir_format(0) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir , exist_ok=_lowerCamelCase) os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase) accelerator.wait_for_everyone() lowercase__ : Dict = None lowercase__ : Union[str, Any] = None lowercase__ : Union[str, Any] = 0 lowercase__ : Tuple = False # Show the progress bar lowercase__ : Optional[int] = tqdm(range(args.max_selftrain_iterations) , disable=not accelerator.is_local_main_process) # Self-train for iteration in range(0 , int(args.max_selftrain_iterations)): lowercase__ : List[str] = data_dir_format(_lowerCamelCase) assert os.path.exists(_lowerCamelCase) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 lowercase__ : Dict = os.path.join(_lowerCamelCase , "stage-1") lowercase__ : List[str] = { "accelerator": accelerator, "model_name_or_path": args.model_name_or_path, "cache_dir": args.cache_dir, "do_train": True, "train_file": data_files["train"] if iteration == 0 else data_files["train_pseudo"], "do_eval": True if args.eval_file is not None else False, "eval_file": data_files["eval"], "do_predict": True, "infer_file": data_files["infer"], "task_name": args.task_name, "label_list": args.label_list, "output_dir": current_output_dir, "eval_metric": args.eval_metric, "evaluation_strategy": args.evaluation_strategy, "early_stopping_patience": args.early_stopping_patience, "early_stopping_threshold": args.early_stopping_threshold, "seed": args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(_lowerCamelCase , _lowerCamelCase): arguments_dict.update({key: value}) lowercase__ : Optional[Any] = os.path.join(_lowerCamelCase , "best-checkpoint" , _lowerCamelCase) if os.path.exists(_lowerCamelCase): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." , _lowerCamelCase , _lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" , _lowerCamelCase) finetune(**_lowerCamelCase) accelerator.wait_for_everyone() assert os.path.exists(_lowerCamelCase) logger.info("Self-training job completed: iteration: %d, stage: 1." , _lowerCamelCase) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data lowercase__ : Tuple = os.path.join(_lowerCamelCase , "best-checkpoint") lowercase__ : str = os.path.join(_lowerCamelCase , "stage-2") # Update arguments_dict lowercase__ : int = model_path lowercase__ : List[str] = data_files["train"] lowercase__ : List[str] = current_output_dir lowercase__ : str = os.path.join(_lowerCamelCase , "best-checkpoint" , _lowerCamelCase) if os.path.exists(_lowerCamelCase): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." , _lowerCamelCase , _lowerCamelCase , ) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" , _lowerCamelCase) finetune(**_lowerCamelCase) accelerator.wait_for_everyone() assert os.path.exists(_lowerCamelCase) logger.info("Self-training job completed: iteration: %d, stage: 2." , _lowerCamelCase) lowercase__ : Any = iteration lowercase__ : List[str] = data_dir_format(iteration + 1) lowercase__ : Optional[Any] = AutoConfig.from_pretrained(os.path.join(_lowerCamelCase , "best-checkpoint")) lowercase__ : Optional[Any] = config.idalabel lowercase__ : Optional[int] = os.path.join(_lowerCamelCase , "eval_results_best-checkpoint.json") lowercase__ : Dict = os.path.join(_lowerCamelCase , "test_results_best-checkpoint.json") assert os.path.exists(_lowerCamelCase) with open(_lowerCamelCase , "r") as f: lowercase__ : Dict = float(json.load(_lowerCamelCase)[args.eval_metric]) lowercase__ : Any = os.path.join(_lowerCamelCase , "infer_output_best-checkpoint.csv") assert os.path.exists(_lowerCamelCase) # Loading the dataset from local csv or json files. lowercase__ : List[Any] = load_dataset(args.data_file_extension , data_files={"data": data_files["infer"]})["data"] lowercase__ : Tuple = load_dataset("csv" , data_files={"data": infer_output_file})["data"] if accelerator.is_main_process: os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase) shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , f'''eval_results_iter-{iteration}.json''')) if os.path.exists(_lowerCamelCase): shutil.copy(_lowerCamelCase , os.path.join(_lowerCamelCase , f'''test_results_iter-{iteration}.json''')) create_pseudo_labeled_data(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase) accelerator.wait_for_everyone() lowercase__ : Dict = os.path.join(_lowerCamelCase , f'''train_pseudo.{args.data_file_extension}''') if args.evaluation_strategy != IntervalStrategy.NO.value: lowercase__ : Union[str, Any] = eval_result if best_iteration is None: lowercase__ : Union[str, Any] = new_iteration lowercase__ : List[Any] = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: lowercase__ : Dict = new_iteration lowercase__ : Any = new_eval_result lowercase__ : str = 0 else: if new_eval_result == best_eval_result: lowercase__ : str = new_iteration lowercase__ : List[Any] = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: lowercase__ : Dict = True progress_bar.update(1) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" , _lowerCamelCase) logger.info("Best evaluation result: %s = %f" , args.eval_metric , _lowerCamelCase) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_lowerCamelCase , f'''eval_results_iter-{iteration}.json''') , os.path.join(_lowerCamelCase , "eval_results_best-iteration.json") , ) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" , args.max_selftrain_iterations - 1) logger.info("Best evaluation result: %s = %f" , args.eval_metric , _lowerCamelCase) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(_lowerCamelCase , f'''eval_results_iter-{args.max_selftrain_iterations - 1}.json''') , os.path.join(_lowerCamelCase , "eval_results_best-iteration.json") , )
87
import sacrebleu as scb from packaging import version from sacrebleu import TER import datasets UpperCamelCase = '''\ @inproceedings{snover-etal-2006-study, title = "A Study of Translation Edit Rate with Targeted Human Annotation", author = "Snover, Matthew and Dorr, Bonnie and Schwartz, Rich and Micciulla, Linnea and Makhoul, John", booktitle = "Proceedings of the 7th Conference of the Association for Machine Translation in the Americas: Technical Papers", month = aug # " 8-12", year = "2006", address = "Cambridge, Massachusetts, USA", publisher = "Association for Machine Translation in the Americas", url = "https://aclanthology.org/2006.amta-papers.25", pages = "223--231", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' UpperCamelCase = '''\ TER (Translation Edit Rate, also called Translation Error Rate) is a metric to quantify the edit operations that a hypothesis requires to match a reference translation. We use the implementation that is already present in sacrebleu (https://github.com/mjpost/sacreBLEU#ter), which in turn is inspired by the TERCOM implementation, which can be found here: https://github.com/jhclark/tercom. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#ter for more information. ''' UpperCamelCase = ''' Produces TER scores alongside the number of edits and reference length. Args: predictions (list of str): The system stream (a sequence of segments). references (list of list of str): A list of one or more reference streams (each a sequence of segments). normalized (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. ignore_punct (boolean): If `True`, applies basic tokenization and normalization to sentences. Defaults to `False`. support_zh_ja_chars (boolean): If `True`, tokenization/normalization supports processing of Chinese characters, as well as Japanese Kanji, Hiragana, Katakana, and Phonetic Extensions of Katakana. Only applies if `normalized = True`. Defaults to `False`. case_sensitive (boolean): If `False`, makes all predictions and references lowercase to ignore differences in case. Defaults to `False`. Returns: \'score\' (float): TER score (num_edits / sum_ref_lengths * 100) \'num_edits\' (int): The cumulative number of edits \'ref_length\' (float): The cumulative average reference length Examples: Example 1: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 150.0, \'num_edits\': 15, \'ref_length\': 10.0} Example 2: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... case_sensitive=True) >>> print(results) {\'score\': 62.5, \'num_edits\': 5, \'ref_length\': 8.0} Example 3: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... normalized=True, ... case_sensitive=True) >>> print(results) {\'score\': 57.14285714285714, \'num_edits\': 6, \'ref_length\': 10.5} Example 4: >>> predictions = ["does this sentence match??", ... "what about this sentence?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 0.0, \'num_edits\': 0, \'ref_length\': 8.0} Example 5: >>> predictions = ["does this sentence match??", ... "what about this sentence?", ... "What did the TER metric user say to the developer?"] >>> references = [["does this sentence match", "does this sentence match!?!"], ... ["wHaT aBoUt ThIs SeNtEnCe?", "wHaT aBoUt ThIs SeNtEnCe?"], ... ["Your jokes are...", "...TERrible"]] >>> ter = datasets.load_metric("ter") >>> results = ter.compute(predictions=predictions, ... references=references, ... ignore_punct=True, ... case_sensitive=False) >>> print(results) {\'score\': 100.0, \'num_edits\': 10, \'ref_length\': 10.0} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION ) class snake_case_ ( datasets.Metric ): def __UpperCamelCase ( self : Union[str, Any] ) -> Tuple: if version.parse(scb.__version__ ) < version.parse("1.4.12" ): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" "You can install it with `pip install \"sacrebleu>=1.4.12\"`." ) return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="http://www.cs.umd.edu/~snover/tercom/" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ), } ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#ter"] , reference_urls=[ "https://github.com/jhclark/tercom", ] , ) def __UpperCamelCase ( self : Optional[Any] , lowercase_ : str , lowercase_ : Optional[int] , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , lowercase_ : bool = False , ) -> Any: lowercase__ : Optional[int] = len(references[0] ) if any(len(lowercase_ ) != references_per_prediction for refs in references ): raise ValueError("Sacrebleu requires the same number of references for each prediction" ) lowercase__ : Union[str, Any] = [[refs[i] for refs in references] for i in range(lowercase_ )] lowercase__ : str = TER( normalized=lowercase_ , no_punct=lowercase_ , asian_support=lowercase_ , case_sensitive=lowercase_ , ) lowercase__ : List[str] = sb_ter.corpus_score(lowercase_ , lowercase_ ) return {"score": output.score, "num_edits": output.num_edits, "ref_length": output.ref_length}
87
1
import argparse import json import subprocess def lowercase_ ( _lowerCamelCase : int , _lowerCamelCase : List[Any]): lowercase__ : List[Any] = [] lowercase__ : Dict = ( f'''curl -H "Accept: application/vnd.github+json" -H "Authorization: Bearer {token}"''' " https://api.github.com/repos/huggingface/transformers/actions/runners" ) lowercase__ : int = subprocess.run(_lowerCamelCase , shell=_lowerCamelCase , stdout=subprocess.PIPE) lowercase__ : Tuple = output.stdout.decode("utf-8") lowercase__ : List[Any] = json.loads(_lowerCamelCase) lowercase__ : str = status["runners"] for runner in runners: if runner["name"] in target_runners: if runner["status"] == "offline": offline_runners.append(_lowerCamelCase) # save the result so we can report them on Slack with open("offline_runners.txt" , "w") as fp: fp.write(json.dumps(_lowerCamelCase)) if len(_lowerCamelCase) > 0: lowercase__ : int = "\n".join([x["name"] for x in offline_runners]) raise ValueError(f'''The following runners are offline:\n{failed}''') if __name__ == "__main__": def lowercase_ ( _lowerCamelCase : Union[str, Any]): return values.split(",") UpperCamelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--target_runners''', default=None, type=list_str, required=True, help='''Comma-separated list of runners to check status.''', ) parser.add_argument( '''--token''', default=None, type=str, required=True, help='''A token that has actions:read permission.''' ) UpperCamelCase = parser.parse_args() get_runner_status(args.target_runners, args.token)
87
def lowercase_ ( _lowerCamelCase : int): lowercase__ : Dict = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
87
1
from __future__ import annotations import requests UpperCamelCase = set( '''approved_at_utc approved_by author_flair_background_color author_flair_css_class author_flair_richtext author_flair_template_id author_fullname author_premium can_mod_post category clicked content_categories created_utc downs edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta is_original_content is_reddit_media_domain is_video link_flair_css_class link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title name permalink pwls quarantine saved score secure_media secure_media_embed selftext subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type total_awards_received ups upvote_ratio url user_reports'''.split() ) def lowercase_ ( _lowerCamelCase : str , _lowerCamelCase : int = 1 , _lowerCamelCase : str = "new" , _lowerCamelCase : list | None = None): lowercase__ : int = wanted_data or [] if invalid_search_terms := ", ".join(sorted(set(_lowerCamelCase) - valid_terms)): lowercase__ : str = f'''Invalid search term: {invalid_search_terms}''' raise ValueError(_lowerCamelCase) lowercase__ : Tuple = requests.get( f'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"User-agent": "A random string"} , ) if response.status_code == 429: raise requests.HTTPError lowercase__ : List[Any] = response.json() if not wanted_data: return {id_: data["data"]["children"][id_] for id_ in range(_lowerCamelCase)} lowercase__ : Any = {} for id_ in range(_lowerCamelCase): lowercase__ : List[str] = { item: data["data"]["children"][id_]["data"][item] for item in wanted_data } return data_dict if __name__ == "__main__": # If you get Error 429, that means you are rate limited.Try after some time print(get_subreddit_data('''learnpython''', wanted_data=['''title''', '''url''', '''selftext''']))
87
from PIL import Image def lowercase_ ( _lowerCamelCase : Image , _lowerCamelCase : int): lowercase__ : List[str] = (259 * (level + 255)) / (255 * (259 - level)) def contrast(_lowerCamelCase : int) -> int: return int(128 + factor * (c - 128)) return img.point(_lowerCamelCase) if __name__ == "__main__": # Load image with Image.open('''image_data/lena.jpg''') as img: # Change contrast to 170 UpperCamelCase = change_contrast(img, 170) cont_img.save('''image_data/lena_high_contrast.png''', format='''png''')
87
1