code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import os import time import warnings from dataclasses import dataclass, field from enum import Enum from typing import List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import logging from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors from ..processors.utils import InputFeatures a_ = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ : UpperCamelCase =field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} ) UpperCamelCase =field( metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} ) UpperCamelCase =field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCamelCase =field( default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[str] = self.task_name.lower() class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="train" UpperCamelCase ="dev" UpperCamelCase ="test" class UpperCAmelCase_ ( snake_case ): UpperCamelCase =42 UpperCamelCase =42 UpperCamelCase =42 def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = Split.train , UpperCamelCase_ = None , ) -> List[str]: warnings.warn( '''This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets ''' '''library. You can have a look at this example script for pointers: ''' '''https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py''' , UpperCamelCase_ , ) __lowercase : Optional[int] = args __lowercase : Optional[int] = glue_processors[args.task_name]() __lowercase : List[str] = glue_output_modes[args.task_name] if isinstance(UpperCamelCase_ , UpperCamelCase_ ): try: __lowercase : Union[str, Any] = Split[mode] except KeyError: raise KeyError('''mode is not a valid split name''' ) # Load data features from cache or dataset file __lowercase : Union[str, Any] = os.path.join( cache_dir if cache_dir is not None else args.data_dir , F"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}""" , ) __lowercase : Optional[int] = self.processor.get_labels() if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in ( "RobertaTokenizer", "RobertaTokenizerFast", "XLMRobertaTokenizer", "BartTokenizer", "BartTokenizerFast", ): # HACK(label indices are swapped in RoBERTa pretrained model) __lowercase ,__lowercase : str = label_list[2], label_list[1] __lowercase : Optional[int] = label_list # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. __lowercase : List[Any] = cached_features_file + '''.lock''' with FileLock(UpperCamelCase_ ): if os.path.exists(UpperCamelCase_ ) and not args.overwrite_cache: __lowercase : List[Any] = time.time() __lowercase : Dict = torch.load(UpperCamelCase_ ) logger.info( F"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start ) else: logger.info(F"""Creating features from dataset file at {args.data_dir}""" ) if mode == Split.dev: __lowercase : Dict = self.processor.get_dev_examples(args.data_dir ) elif mode == Split.test: __lowercase : Tuple = self.processor.get_test_examples(args.data_dir ) else: __lowercase : int = self.processor.get_train_examples(args.data_dir ) if limit_length is not None: __lowercase : str = examples[:limit_length] __lowercase : Optional[int] = glue_convert_examples_to_features( UpperCamelCase_ , UpperCamelCase_ , max_length=args.max_seq_length , label_list=UpperCamelCase_ , output_mode=self.output_mode , ) __lowercase : List[Any] = time.time() torch.save(self.features , UpperCamelCase_ ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( F"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""" ) def __len__( self ) -> str: return len(self.features ) def __getitem__( self , UpperCamelCase_ ) -> InputFeatures: return self.features[i] def _lowerCamelCase ( self ) -> Optional[int]: return self.label_list
76
"""simple docstring""" from math import pi, sqrt, tan def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __lowercase : int = (sidea + sidea + sidea) / 2 __lowercase : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(F"Rectangle: {area_rectangle(1_0, 2_0) = }") print(F"Square: {area_square(1_0) = }") print(F"Triangle: {area_triangle(1_0, 1_0) = }") print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }") print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }") print(F"Rhombus: {area_rhombus(1_0, 2_0) = }") print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }") print(F"Circle: {area_circle(2_0) = }") print(F"Ellipse: {area_ellipse(1_0, 2_0) = }") print('\nSurface Areas of various geometric shapes: \n') print(F"Cube: {surface_area_cube(2_0) = }") print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }") print(F"Sphere: {surface_area_sphere(2_0) = }") print(F"Hemisphere: {surface_area_hemisphere(2_0) = }") print(F"Cone: {surface_area_cone(1_0, 2_0) = }") print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }") print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }") print(F"Torus: {surface_area_torus(2_0, 1_0) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }") print(F"Square: {area_reg_polygon(4, 1_0) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : int = '''''' for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __lowercase : str = remove_duplicates(key.upper() ) __lowercase : Any = len(__UpperCamelCase ) # First fill cipher with key characters __lowercase : Any = {alphabet[i]: char for i, char in enumerate(__UpperCamelCase )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(__UpperCamelCase ) , 26 ): __lowercase : Optional[Any] = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __lowercase : int = alphabet[i - offset] __lowercase : int = char return cipher_alphabet def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return "".join(cipher_map.get(__UpperCamelCase , __UpperCamelCase ) for ch in message.upper() ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(__UpperCamelCase , __UpperCamelCase ) for ch in message.upper() ) def __UpperCAmelCase ( ): __lowercase : Optional[int] = input('''Enter message to encode or decode: ''' ).strip() __lowercase : Dict = input('''Enter keyword: ''' ).strip() __lowercase : Tuple = input('''Encipher or decipher? E/D:''' ).strip()[0].lower() try: __lowercase : List[Any] = {'''e''': encipher, '''d''': decipher}[option] except KeyError: raise KeyError('''invalid input option''' ) __lowercase : int = create_cipher_map(__UpperCamelCase ) print(func(__UpperCamelCase , __UpperCamelCase ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # noqa: E741 while r - l > 1: __lowercase : int = (l + r) // 2 if v[m] >= key: __lowercase : Any = m else: __lowercase : List[Any] = m # noqa: E741 return r def __UpperCAmelCase ( __UpperCamelCase ): if len(__UpperCamelCase ) == 0: return 0 __lowercase : List[str] = [0] * len(__UpperCamelCase ) __lowercase : Any = 1 __lowercase : Dict = v[0] for i in range(1 , len(__UpperCamelCase ) ): if v[i] < tail[0]: __lowercase : Tuple = v[i] elif v[i] > tail[length - 1]: __lowercase : Optional[Any] = v[i] length += 1 else: __lowercase : Dict = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" import numpy class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> None: __lowercase : Any = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. __lowercase : str = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. __lowercase : Union[str, Any] = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. __lowercase : Dict = numpy.random.rand(3 , 1 ) # Real output values provided. __lowercase : List[Any] = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. __lowercase : int = numpy.zeros(output_array.shape ) def _lowerCamelCase ( self ) -> numpy.ndarray: __lowercase : Union[str, Any] = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. __lowercase : str = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. __lowercase : str = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowerCamelCase ( self ) -> None: __lowercase : List[Any] = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) __lowercase : Union[str, Any] = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) __lowercase : Union[str, Any] = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None: for iteration in range(1 , iterations + 1 ): __lowercase : Optional[int] = self.feedforward() self.back_propagation() if give_loss: __lowercase : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F"""Iteration {iteration} Loss: {loss}""" ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> int: __lowercase : List[Any] = input_arr __lowercase : List[Any] = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) __lowercase : Tuple = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) __lowercase : Any = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def __UpperCAmelCase ( __UpperCamelCase ): return 1 / (1 + numpy.exp(-value )) def __UpperCAmelCase ( __UpperCamelCase ): return (value) * (1 - (value)) def __UpperCAmelCase ( ): __lowercase : List[str] = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ) , dtype=numpy.floataa , ) # True output values for the given input values. __lowercase : Optional[Any] = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa ) # Calling neural network class. __lowercase : Optional[Any] = TwoHiddenLayerNeuralNetwork( input_array=__UpperCamelCase , output_array=__UpperCamelCase ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=__UpperCamelCase , iterations=10 , give_loss=__UpperCamelCase ) return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) ) if __name__ == "__main__": example()
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase = 4 ): __lowercase : Dict = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Union[str, Any] = matrix[::-1] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [x[::-1] for x in matrix] return matrix def __UpperCAmelCase ( __UpperCamelCase ): for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
1
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(snake_case ) class UpperCAmelCase_ : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __lowercase : int = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __lowercase : str = len(UpperCamelCase_ ) __lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" ) __lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : Optional[Any] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __lowercase : str = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : List[str] = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]: __lowercase : List[Any] = reader_input['''input_ids'''] __lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3] __lowercase : Optional[int] = len(UpperCamelCase_ ) __lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : Any = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __lowercase : List[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]: __lowercase : Tuple = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __lowercase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowercase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase =["input_ids", "attention_mask"]
76
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(snake_case ) class UpperCAmelCase_ : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __lowercase : int = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __lowercase : str = len(UpperCamelCase_ ) __lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" ) __lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : Optional[Any] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __lowercase : str = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : List[str] = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]: __lowercase : List[Any] = reader_input['''input_ids'''] __lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3] __lowercase : Optional[int] = len(UpperCamelCase_ ) __lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : Any = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __lowercase : List[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]: __lowercase : Tuple = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __lowercase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowercase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase =["input_ids", "attention_mask"]
76
1
"""simple docstring""" from manim import * class UpperCAmelCase_ ( snake_case ): def _lowerCamelCase ( self ) -> List[str]: __lowercase : Tuple = Rectangle(height=0.5 , width=0.5 ) __lowercase : Tuple = Rectangle(height=0.2_5 , width=0.2_5 ) __lowercase : List[Any] = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 ) __lowercase : Optional[Any] = [mem.copy() for i in range(6 )] __lowercase : Optional[int] = [mem.copy() for i in range(6 )] __lowercase : Dict = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : str = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : List[str] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : Any = Text('''CPU''' , font_size=24 ) __lowercase : Tuple = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCamelCase_ ) __lowercase : Optional[Any] = [mem.copy() for i in range(4 )] __lowercase : List[Any] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : Optional[Any] = Text('''GPU''' , font_size=24 ) __lowercase : Dict = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ ) gpu.move_to([-1, -1, 0] ) self.add(UpperCamelCase_ ) __lowercase : Any = [mem.copy() for i in range(6 )] __lowercase : Dict = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : Dict = Text('''Model''' , font_size=24 ) __lowercase : List[str] = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ ) model.move_to([3, -1.0, 0] ) self.add(UpperCamelCase_ ) __lowercase : int = [] __lowercase : Dict = [] __lowercase : Optional[Any] = [] for i, rect in enumerate(UpperCamelCase_ ): rect.set_stroke(UpperCamelCase_ ) __lowercase : List[str] = Rectangle(height=0.4_6 / 4 , width=0.4_6 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase_ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.0_2 , direction=UpperCamelCase_ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase_ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase_ , buff=0.0 ) self.add(UpperCamelCase_ ) model_cpu_arr.append(UpperCamelCase_ ) self.add(*UpperCamelCase_ , *UpperCamelCase_ , *UpperCamelCase_ ) __lowercase : Optional[int] = [mem.copy() for i in range(6 )] __lowercase : List[str] = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : List[str] = Text('''Loaded Checkpoint''' , font_size=24 ) __lowercase : Dict = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ ) checkpoint.move_to([3, 0.5, 0] ) self.add(UpperCamelCase_ ) __lowercase : List[Any] = [] __lowercase : str = [] for i, rect in enumerate(UpperCamelCase_ ): __lowercase : Dict = fill.copy().set_fill(UpperCamelCase_ , opacity=0.7 ) target.move_to(UpperCamelCase_ ) ckpt_arr.append(UpperCamelCase_ ) __lowercase : Dict = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(UpperCamelCase_ ) self.add(*UpperCamelCase_ , *UpperCamelCase_ ) __lowercase : str = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __lowercase : List[Any] = MarkupText( F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Any = MarkupText( F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" , font_size=18 , ) blue_text.next_to(UpperCamelCase_ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(UpperCamelCase_ ) __lowercase : int = MarkupText( F"""Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.""" , font_size=24 , ) step_a.move_to([2, 2, 0] ) __lowercase : int = [meta_mem.copy() for i in range(6 )] __lowercase : Optional[int] = [meta_mem.copy() for i in range(6 )] __lowercase : int = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : Any = VGroup(*UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : List[str] = VGroup(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0 ) __lowercase : Tuple = Text('''Disk''' , font_size=24 ) __lowercase : Tuple = Group(UpperCamelCase_ , UpperCamelCase_ ).arrange(UpperCamelCase_ , buff=0.5 , aligned_edge=UpperCamelCase_ ) disk.move_to([-4.0, -1.2_5, 0] ) self.play(Write(UpperCamelCase_ , run_time=3 ) , Write(UpperCamelCase_ , run_time=1 ) , Create(UpperCamelCase_ , run_time=1 ) ) __lowercase : Tuple = [] for i, rect in enumerate(UpperCamelCase_ ): __lowercase : Any = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(UpperCamelCase_ , run_time=1.5 ) ) self.play(*UpperCamelCase_ ) self.play(FadeOut(UpperCamelCase_ ) ) __lowercase : List[Any] = MarkupText(F"""Then, the checkpoint is removed from memory\nthrough garbage collection.""" , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCamelCase_ , run_time=3 ) ) self.play( FadeOut(UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , *UpperCamelCase_ ) , ) self.wait()
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" from collections import deque def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[Any] = len(__UpperCamelCase ) __lowercase : Optional[Any] = deque() __lowercase : Optional[int] = [False for _ in range(__UpperCamelCase )] __lowercase : List[Any] = [-1 for _ in range(__UpperCamelCase )] __lowercase : List[Any] = index_of[:] def strong_connect(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : str = index # the number when this node is seen __lowercase : Dict = index # lowest rank node reachable from here index += 1 stack.append(__UpperCamelCase ) __lowercase : Optional[int] = True for w in g[v]: if index_of[w] == -1: __lowercase : int = strong_connect(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __lowercase : Tuple = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __lowercase : List[str] = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __lowercase : Optional[int] = [] __lowercase : List[Any] = stack.pop() __lowercase : Any = False component.append(__UpperCamelCase ) while w != v: __lowercase : Optional[Any] = stack.pop() __lowercase : Optional[Any] = False component.append(__UpperCamelCase ) components.append(__UpperCamelCase ) return index __lowercase : Optional[int] = [] for v in range(__UpperCamelCase ): if index_of[v] == -1: strong_connect(__UpperCamelCase , 0 , __UpperCamelCase ) return components def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Union[str, Any] = [[] for _ in range(__UpperCamelCase )] for u, v in edges: g[u].append(__UpperCamelCase ) return g if __name__ == "__main__": # Test a_ = 7 a_ = [0, 0, 1, 2, 3, 3, 4, 4, 6] a_ = [1, 3, 2, 0, 1, 4, 5, 6, 5] a_ = [(u, v) for u, v in zip(source, target)] a_ = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
76
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __UpperCAmelCase ( __UpperCamelCase ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __lowercase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __lowercase : Dict = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __lowercase : Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __lowercase : Tuple = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __lowercase : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __lowercase : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __lowercase : Optional[int] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __lowercase : Union[str, Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __lowercase : str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __lowercase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __lowercase : str = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __lowercase : Dict = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __lowercase : Union[str, Any] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __lowercase : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' ) __lowercase : Any = key.replace('''image_projection''' , '''flava.image_projection''' ) __lowercase : Tuple = value.float() for key, value in codebook_state_dict.items(): __lowercase : int = value return upgrade @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): if config_path is not None: __lowercase : Union[str, Any] = FlavaConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : Union[str, Any] = FlavaConfig() __lowercase : Any = FlavaForPreTraining(__UpperCamelCase ).eval() __lowercase : Any = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): __lowercase : Optional[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Optional[int] = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) __lowercase : Union[str, Any] = hf_model.state_dict() __lowercase : Optional[Any] = count_parameters(__UpperCamelCase ) __lowercase : List[Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
76
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) a_ = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: super().__init__(**UpperCamelCase_ ) __lowercase : List[str] = size if size is not None else {'''shortest_edge''': 2_56} __lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowercase : Dict = get_size_dict(UpperCamelCase_ ) __lowercase : Dict = do_resize __lowercase : Optional[Any] = size __lowercase : List[Any] = resample __lowercase : Dict = do_center_crop __lowercase : Any = crop_size __lowercase : List[str] = do_rescale __lowercase : List[str] = rescale_factor __lowercase : Optional[Any] = do_normalize __lowercase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : List[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ) -> np.ndarray: return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> Optional[Any]: __lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Tuple = size if size is not None else self.size __lowercase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[str] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(UpperCamelCase_ ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : Any = image_std if image_std is not None else self.image_std __lowercase : Any = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowercase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowercase : Any = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowercase : str = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowercase : Optional[int] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowercase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowercase : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
76
1
"""simple docstring""" from __future__ import annotations from collections import Counter from random import random class UpperCAmelCase_ : def __init__( self ) -> Tuple: __lowercase : Tuple = {} def _lowerCamelCase ( self , UpperCamelCase_ ) -> None: __lowercase : Dict = {} def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None: if nodea not in self.connections: self.add_node(UpperCamelCase_ ) if nodea not in self.connections: self.add_node(UpperCamelCase_ ) __lowercase : Optional[int] = probability def _lowerCamelCase ( self ) -> list[str]: return list(self.connections ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: __lowercase : int = 0 __lowercase : Dict = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : str = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __lowercase : List[str] = Counter(graph.get_nodes() ) __lowercase : Tuple = start for _ in range(__UpperCamelCase ): __lowercase : Any = graph.transition(__UpperCamelCase ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if digit_amount > 0: return round(number - int(__UpperCamelCase ) , __UpperCamelCase ) return number - int(__UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
1
"""simple docstring""" import math def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = 0 , __UpperCamelCase = 0 ): __lowercase : List[Any] = end or len(__UpperCamelCase ) for i in range(__UpperCamelCase , __UpperCamelCase ): __lowercase : Any = i __lowercase : str = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __lowercase : Union[str, Any] = array[temp_index - 1] temp_index -= 1 __lowercase : List[str] = temp_index_value return array def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # Max Heap __lowercase : Dict = index __lowercase : Optional[Any] = 2 * index + 1 # Left Node __lowercase : str = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __lowercase : str = left_index if right_index < heap_size and array[largest] < array[right_index]: __lowercase : int = right_index if largest != index: __lowercase ,__lowercase : Union[str, Any] = array[largest], array[index] heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[str] = len(__UpperCamelCase ) for i in range(n // 2 , -1 , -1 ): heapify(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for i in range(n - 1 , 0 , -1 ): __lowercase ,__lowercase : Optional[int] = array[0], array[i] heapify(__UpperCamelCase , 0 , __UpperCamelCase ) return array def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : int = low __lowercase : Dict = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __lowercase ,__lowercase : str = array[j], array[i] i += 1 def __UpperCAmelCase ( __UpperCamelCase ): if len(__UpperCamelCase ) == 0: return array __lowercase : Tuple = 2 * math.ceil(math.loga(len(__UpperCamelCase ) ) ) __lowercase : Dict = 16 return intro_sort(__UpperCamelCase , 0 , len(__UpperCamelCase ) , __UpperCamelCase , __UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): while end - start > size_threshold: if max_depth == 0: return heap_sort(__UpperCamelCase ) max_depth -= 1 __lowercase : str = median_of_a(__UpperCamelCase , __UpperCamelCase , start + ((end - start) // 2) + 1 , end - 1 ) __lowercase : Dict = partition(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) intro_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __lowercase : List[str] = p return insertion_sort(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() a_ = input('Enter numbers separated by a comma : ').strip() a_ = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowercase : set[int] = set() return any( node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for node in graph ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): visited.add(__UpperCamelCase ) rec_stk.add(__UpperCamelCase ) for node in graph[vertex]: if node not in visited: if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__UpperCamelCase ) return False if __name__ == "__main__": from doctest import testmod testmod()
76
1
"""simple docstring""" import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": a_ = pd.read_csv('sample_data.csv', header=None) a_ = df.shape[:1][0] # If you're using some other dataset input the target column a_ = df.iloc[:, 1:2] a_ = actual_data.values.reshape(len_data, 1) a_ = MinMaxScaler().fit_transform(actual_data) a_ = 1_0 a_ = 5 a_ = 2_0 a_ = len_data - periods * look_back a_ = actual_data[:division] a_ = actual_data[division - look_back :] a_ , a_ = [], [] a_ , a_ = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) a_ = np.array(train_x) a_ = np.array(test_x) a_ = np.array([list(i.ravel()) for i in train_y]) a_ = np.array([list(i.ravel()) for i in test_y]) a_ = Sequential() model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(6_4, input_shape=(1_2_8, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') a_ = model.fit( x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4 ) a_ = model.predict(x_test)
76
"""simple docstring""" import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a_ = logging.getLogger(__name__) class UpperCAmelCase_ ( snake_case ): def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]: __lowercase : Tuple = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] ) __lowercase : Any = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> int: super().__init__(UpperCamelCase_ ) __lowercase : Optional[Any] = BertEncoderWithPabee(UpperCamelCase_ ) self.init_weights() __lowercase : str = 0 __lowercase : Optional[Any] = 0 __lowercase : Optional[int] = 0 __lowercase : int = 0 def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = threshold def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Optional[int] = patience def _lowerCamelCase ( self ) -> List[str]: __lowercase : Tuple = 0 __lowercase : Tuple = 0 def _lowerCamelCase ( self ) -> List[Any]: __lowercase : Optional[int] = self.inference_layers_num / self.inference_instances_num __lowercase : int = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: __lowercase : Tuple = input_ids.size() elif inputs_embeds is not None: __lowercase : List[Any] = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) __lowercase : int = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: __lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __lowercase ,__lowercase ,__lowercase : Optional[int] = encoder_hidden_states.size() __lowercase : Any = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __lowercase : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) __lowercase : Tuple = self.invert_attention_mask(UpperCamelCase_ ) else: __lowercase : Tuple = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) __lowercase : Optional[int] = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) __lowercase : Union[str, Any] = embedding_output if self.training: __lowercase : List[Any] = [] for i in range(self.config.num_hidden_layers ): __lowercase : str = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : int = self.pooler(UpperCamelCase_ ) __lowercase : str = output_layers[i](output_dropout(UpperCamelCase_ ) ) res.append(UpperCamelCase_ ) elif self.patience == 0: # Use all layers for inference __lowercase : int = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) __lowercase : Optional[Any] = self.pooler(encoder_outputs[0] ) __lowercase : int = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )] else: __lowercase : Optional[int] = 0 __lowercase : Union[str, Any] = None __lowercase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __lowercase : Tuple = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : Dict = self.pooler(UpperCamelCase_ ) __lowercase : Optional[int] = output_layers[i](UpperCamelCase_ ) if regression: __lowercase : Any = logits.detach() if patient_result is not None: __lowercase : List[str] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __lowercase : int = 0 else: __lowercase : List[str] = logits.detach().argmax(dim=1 ) if patient_result is not None: __lowercase : Optional[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ): patient_counter += 1 else: __lowercase : Tuple = 0 __lowercase : Union[str, Any] = logits if patient_counter == self.patience: break __lowercase : Optional[int] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> Optional[Any]: super().__init__(UpperCamelCase_ ) __lowercase : List[Any] = config.num_labels __lowercase : int = BertModelWithPabee(UpperCamelCase_ ) __lowercase : int = nn.Dropout(config.hidden_dropout_prob ) __lowercase : Union[str, Any] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> int: __lowercase : Union[str, Any] = self.bert( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __lowercase : List[str] = (logits[-1],) if labels is not None: __lowercase : Any = None __lowercase : Optional[int] = 0 for ix, logits_item in enumerate(UpperCamelCase_ ): if self.num_labels == 1: # We are doing regression __lowercase : Any = MSELoss() __lowercase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __lowercase : str = CrossEntropyLoss() __lowercase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __lowercase : List[str] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __lowercase : Union[str, Any] = (total_loss / total_weights,) + outputs return outputs
76
1
"""simple docstring""" import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) a_ = { 'sample_size': 3_2, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1_0_0_0, 'block_out_channels': [3_2, 6_4], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } a_ = { 'sample_size': 6_4, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1_0_0_0, 'block_out_channels': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4], 'attention_head_dim': 6_4, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } a_ = { 'sample_size': 2_5_6, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4], 'attention_head_dim': 6_4, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } a_ = { 'num_train_timesteps': 4_0, 'sigma_min': 0.002, 'sigma_max': 80.0, } a_ = { 'num_train_timesteps': 2_0_1, 'sigma_min': 0.002, 'sigma_max': 80.0, } a_ = { 'num_train_timesteps': 1_5_1, 'sigma_min': 0.002, 'sigma_max': 80.0, } def __UpperCAmelCase ( __UpperCamelCase ): if isinstance(__UpperCamelCase , __UpperCamelCase ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError('''boolean value expected''' ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): __lowercase : Optional[int] = checkpoint[f"""{old_prefix}.in_layers.0.weight"""] __lowercase : Union[str, Any] = checkpoint[f"""{old_prefix}.in_layers.0.bias"""] __lowercase : List[str] = checkpoint[f"""{old_prefix}.in_layers.2.weight"""] __lowercase : List[str] = checkpoint[f"""{old_prefix}.in_layers.2.bias"""] __lowercase : Tuple = checkpoint[f"""{old_prefix}.emb_layers.1.weight"""] __lowercase : str = checkpoint[f"""{old_prefix}.emb_layers.1.bias"""] __lowercase : Tuple = checkpoint[f"""{old_prefix}.out_layers.0.weight"""] __lowercase : Optional[int] = checkpoint[f"""{old_prefix}.out_layers.0.bias"""] __lowercase : Dict = checkpoint[f"""{old_prefix}.out_layers.3.weight"""] __lowercase : Tuple = checkpoint[f"""{old_prefix}.out_layers.3.bias"""] if has_skip: __lowercase : Optional[Any] = checkpoint[f"""{old_prefix}.skip_connection.weight"""] __lowercase : int = checkpoint[f"""{old_prefix}.skip_connection.bias"""] return new_checkpoint def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): __lowercase ,__lowercase ,__lowercase : Tuple = checkpoint[f"""{old_prefix}.qkv.weight"""].chunk(3 , dim=0 ) __lowercase ,__lowercase ,__lowercase : List[Any] = checkpoint[f"""{old_prefix}.qkv.bias"""].chunk(3 , dim=0 ) __lowercase : List[str] = checkpoint[f"""{old_prefix}.norm.weight"""] __lowercase : Tuple = checkpoint[f"""{old_prefix}.norm.bias"""] __lowercase : Tuple = weight_q.squeeze(-1 ).squeeze(-1 ) __lowercase : int = bias_q.squeeze(-1 ).squeeze(-1 ) __lowercase : int = weight_k.squeeze(-1 ).squeeze(-1 ) __lowercase : Optional[int] = bias_k.squeeze(-1 ).squeeze(-1 ) __lowercase : int = weight_v.squeeze(-1 ).squeeze(-1 ) __lowercase : str = bias_v.squeeze(-1 ).squeeze(-1 ) __lowercase : List[str] = ( checkpoint[f"""{old_prefix}.proj_out.weight"""].squeeze(-1 ).squeeze(-1 ) ) __lowercase : int = checkpoint[f"""{old_prefix}.proj_out.bias"""].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : int = torch.load(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Union[str, Any] = {} __lowercase : Union[str, Any] = checkpoint['''time_embed.0.weight'''] __lowercase : List[Any] = checkpoint['''time_embed.0.bias'''] __lowercase : Dict = checkpoint['''time_embed.2.weight'''] __lowercase : Optional[int] = checkpoint['''time_embed.2.bias'''] if unet_config["num_class_embeds"] is not None: __lowercase : Optional[Any] = checkpoint['''label_emb.weight'''] __lowercase : Any = checkpoint['''input_blocks.0.0.weight'''] __lowercase : str = checkpoint['''input_blocks.0.0.bias'''] __lowercase : Dict = unet_config['''down_block_types'''] __lowercase : str = unet_config['''layers_per_block'''] __lowercase : Tuple = unet_config['''attention_head_dim'''] __lowercase : List[str] = unet_config['''block_out_channels'''] __lowercase : Tuple = 1 __lowercase : Optional[Any] = channels_list[0] for i, layer_type in enumerate(__UpperCamelCase ): __lowercase : List[Any] = channels_list[i] __lowercase : List[Any] = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(__UpperCamelCase ): __lowercase : Dict = f"""down_blocks.{i}.resnets.{j}""" __lowercase : Union[str, Any] = f"""input_blocks.{current_layer}.0""" __lowercase : List[Any] = True if j == 0 and downsample_block_has_skip else False __lowercase : Union[str, Any] = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(__UpperCamelCase ): __lowercase : str = f"""down_blocks.{i}.resnets.{j}""" __lowercase : int = f"""input_blocks.{current_layer}.0""" __lowercase : int = True if j == 0 and downsample_block_has_skip else False __lowercase : Dict = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase ) __lowercase : Any = f"""down_blocks.{i}.attentions.{j}""" __lowercase : Dict = f"""input_blocks.{current_layer}.1""" __lowercase : Dict = convert_attention( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) current_layer += 1 if i != len(__UpperCamelCase ) - 1: __lowercase : int = f"""down_blocks.{i}.downsamplers.0""" __lowercase : Optional[int] = f"""input_blocks.{current_layer}.0""" __lowercase : int = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) current_layer += 1 __lowercase : Any = current_channels # hardcoded the mid-block for now __lowercase : List[Any] = '''mid_block.resnets.0''' __lowercase : int = '''middle_block.0''' __lowercase : Optional[int] = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __lowercase : str = '''mid_block.attentions.0''' __lowercase : Any = '''middle_block.1''' __lowercase : int = convert_attention(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __lowercase : Union[str, Any] = '''mid_block.resnets.1''' __lowercase : Union[str, Any] = '''middle_block.2''' __lowercase : int = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __lowercase : Optional[int] = 0 __lowercase : Optional[int] = unet_config['''up_block_types'''] for i, layer_type in enumerate(__UpperCamelCase ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __lowercase : int = f"""up_blocks.{i}.resnets.{j}""" __lowercase : List[str] = f"""output_blocks.{current_layer}.0""" __lowercase : Tuple = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase ) current_layer += 1 if i != len(__UpperCamelCase ) - 1: __lowercase : Union[str, Any] = f"""up_blocks.{i}.upsamplers.0""" __lowercase : Tuple = f"""output_blocks.{current_layer-1}.1""" __lowercase : List[str] = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __lowercase : Union[str, Any] = f"""up_blocks.{i}.resnets.{j}""" __lowercase : List[str] = f"""output_blocks.{current_layer}.0""" __lowercase : Any = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , has_skip=__UpperCamelCase ) __lowercase : Tuple = f"""up_blocks.{i}.attentions.{j}""" __lowercase : str = f"""output_blocks.{current_layer}.1""" __lowercase : Dict = convert_attention( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) current_layer += 1 if i != len(__UpperCamelCase ) - 1: __lowercase : List[str] = f"""up_blocks.{i}.upsamplers.0""" __lowercase : Dict = f"""output_blocks.{current_layer-1}.2""" __lowercase : Dict = convert_resnet(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) __lowercase : Tuple = checkpoint['''out.0.weight'''] __lowercase : List[Any] = checkpoint['''out.0.bias'''] __lowercase : str = checkpoint['''out.2.weight'''] __lowercase : List[Any] = checkpoint['''out.2.bias'''] return new_checkpoint if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') a_ = parser.parse_args() a_ = strabool(args.class_cond) a_ = os.path.basename(args.unet_path) print(F"Checkpoint: {ckpt_name}") # Get U-Net config if "imagenet64" in ckpt_name: a_ = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a_ = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: a_ = TEST_UNET_CONFIG else: raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.") if not args.class_cond: a_ = None a_ = con_pt_to_diffuser(args.unet_path, unet_config) a_ = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: a_ = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: a_ = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): a_ = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F"Checkpoint type {ckpt_name} is not currently supported.") a_ = CMStochasticIterativeScheduler(**scheduler_config) a_ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
76
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split('''.''' ): __lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __lowercase : int = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase : List[str] = value elif weight_type == "weight_g": __lowercase : Optional[Any] = value elif weight_type == "weight_v": __lowercase : Tuple = value elif weight_type == "bias": __lowercase : Dict = value else: __lowercase : Union[str, Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Tuple = [] __lowercase : Union[str, Any] = fairseq_model.state_dict() __lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __lowercase : int = True if "*" in mapped_key: __lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2] __lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __lowercase : Tuple = '''weight_g''' elif "weight_v" in name: __lowercase : Optional[int] = '''weight_v''' elif "weight" in name: __lowercase : str = '''weight''' elif "bias" in name: __lowercase : Optional[int] = '''bias''' else: __lowercase : List[str] = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1] __lowercase : str = name.split('''.''' ) __lowercase : Dict = int(items[0] ) __lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): if config_path is not None: __lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : str = HubertConfig() if is_finetuned: if dict_path: __lowercase : Tuple = Dictionary.load(__UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : int = target_dict.pad_index __lowercase : Union[str, Any] = target_dict.bos_index __lowercase : int = target_dict.eos_index __lowercase : int = len(target_dict.symbols ) __lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' ) if not os.path.isdir(__UpperCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) ) return os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __UpperCamelCase ) __lowercase : str = WavaVecaCTCTokenizer( __UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , ) __lowercase : str = True if config.feat_extract_norm == '''layer''' else False __lowercase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ) __lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) __lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase ) else: __lowercase : Union[str, Any] = HubertModel(__UpperCamelCase ) if is_finetuned: __lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) a_ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
76
1
"""simple docstring""" from __future__ import annotations import requests def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = f"""https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty""" return requests.get(__UpperCamelCase ).json() def __UpperCAmelCase ( __UpperCamelCase = 10 ): __lowercase : List[str] = '''https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty''' __lowercase : Optional[int] = requests.get(__UpperCamelCase ).json()[:max_stories] return [get_hackernews_story(__UpperCamelCase ) for story_id in story_ids] def __UpperCAmelCase ( __UpperCamelCase = 10 ): __lowercase : str = hackernews_top_stories(__UpperCamelCase ) return "\n".join('''* [{title}]({url})'''.format(**__UpperCamelCase ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
76
"""simple docstring""" a_ = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
76
1
"""simple docstring""" # # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def __UpperCAmelCase ( *__UpperCamelCase ): with open(__UpperCamelCase , '''r''' ) as fh: fcntl.flock(__UpperCamelCase , fcntl.LOCK_EX ) try: print(*__UpperCamelCase ) finally: fcntl.flock(__UpperCamelCase , fcntl.LOCK_UN ) a_ = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) a_ = torch.device('cuda', local_rank) a_ = socket.gethostname() a_ = F"[{hostname}-{local_rank}]" try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank a_ = dist.get_rank() a_ = dist.get_world_size() printflock(F"{gpu} is OK (global rank: {rank}/{world_size})") dist.barrier() if rank == 0: printflock(F"pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}") except Exception: printflock(F"{gpu} is broken") raise
76
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="openai/whisper-base" UpperCamelCase =( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) UpperCamelCase ="transcriber" UpperCamelCase =WhisperProcessor UpperCamelCase =WhisperForConditionalGeneration UpperCamelCase =["audio"] UpperCamelCase =["text"] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).input_features def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.model.generate(inputs=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0]
76
1
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" __lowercase : Dict = float(embedding_dim // 2 ) __lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) __lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 ) # scale embeddings __lowercase : Optional[int] = scale * emb if flip_sin_to_cos: __lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 ) else: __lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 ) __lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] ) return signal class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =jnp.floataa @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ ) __lowercase : str = nn.silu(UpperCamelCase_ ) __lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ ) return temb class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =False UpperCamelCase =1 @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: return get_sinusoidal_embeddings( UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
76
"""simple docstring""" import gc import threading import time import psutil import torch class UpperCAmelCase_ : def __init__( self ) -> str: __lowercase : List[Any] = psutil.Process() __lowercase : Any = False def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Optional[Any] = -1 while True: __lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[Any] = True __lowercase : List[Any] = threading.Thread(target=self.peak_monitor ) __lowercase : Optional[int] = True self.thread.start() def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Union[str, Any] = False self.thread.join() return self.cpu_memory_peak a_ = PeakCPUMemory() def __UpperCAmelCase ( ): # Time __lowercase : Union[str, Any] = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : List[Any] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase ) torch.cuda.reset_peak_memory_stats() return measures def __UpperCAmelCase ( __UpperCamelCase ): # Time __lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20 __lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 __lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 return measures def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): print(f"""{description}:""" ) print(f"""- Time: {measures["time"]:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" ) __lowercase : Dict = measures[f"""{i}-peak"""] print(f"""- GPU {i} peak: {peak:.2f}MiB""" ) print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" ) print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
76
1
"""simple docstring""" import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } a_ = { 'Salesforce/codegen-350M-mono': 2_0_4_8, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =["input_ids", "attention_mask"] UpperCamelCase =CodeGenTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_="<|endoftext|>" , UpperCamelCase_=False , **UpperCamelCase_ , ) -> str: super().__init__( UpperCamelCase_ , UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , unk_token=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , add_prefix_space=UpperCamelCase_ , **UpperCamelCase_ , ) if kwargs.pop('''add_bos_token''' , UpperCamelCase_ ): __lowercase : Optional[int] = kwargs.pop('''name_or_path''' , '''''' ) raise ValueError( '''Currenty GPT2\'s fast tokenizer does NOT support adding a BOS token.''' '''Instead you should use GPT2\'s slow tokenizer class `CodeGenTokenizer` as follows: \n''' F"""`CodeGenTokenizer.from_pretrained('{model_id}')`\nor\n""" F"""`AutoTokenizer.from_pretrained('{model_id}', use_fast=False)`\n""" '''This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.''' ''' so that the fast tokenizer works correctly.''' ) __lowercase : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get('''add_prefix_space''' , UpperCamelCase_ ) != add_prefix_space: __lowercase : Optional[int] = getattr(UpperCamelCase_ , pre_tok_state.pop('''type''' ) ) __lowercase : Tuple = add_prefix_space __lowercase : Optional[Any] = pre_tok_class(**UpperCamelCase_ ) __lowercase : Optional[Any] = add_prefix_space def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> BatchEncoding: __lowercase : int = kwargs.get('''is_split_into_words''' , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> BatchEncoding: __lowercase : Union[str, Any] = kwargs.get('''is_split_into_words''' , UpperCamelCase_ ) assert self.add_prefix_space or not is_split_into_words, ( F"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """ "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: __lowercase : Dict = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> str: __lowercase : Dict = super().decode( token_ids=UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ , clean_up_tokenization_spaces=UpperCamelCase_ , **UpperCamelCase_ , ) if truncate_before_pattern is not None and len(UpperCamelCase_ ) > 0: __lowercase : Optional[int] = self.truncate(UpperCamelCase_ , UpperCamelCase_ ) return decoded_text def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]: def find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ): __lowercase : Optional[int] = pattern.search(UpperCamelCase_ , UpperCamelCase_ ) return m.start() if m else -1 __lowercase : Tuple = [re.compile(UpperCamelCase_ , re.MULTILINE ) for pattern in truncate_before_pattern] __lowercase : Optional[int] = list(re.finditer('''^print''' , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __lowercase : Optional[int] = completion[: prints[1].start()] __lowercase : Any = list(re.finditer('''^def''' , UpperCamelCase_ , re.MULTILINE ) ) if len(UpperCamelCase_ ) > 1: __lowercase : int = completion[: defs[1].start()] __lowercase : Any = 0 __lowercase : List[Any] = [ pos for pos in [find_re(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase_ ) > 0: return completion[: min(UpperCamelCase_ )] else: return completion
76
"""simple docstring""" import numpy as np import datasets a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def _lowerCamelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: # convert to numpy arrays __lowercase : Dict = np.array(UpperCamelCase_ ) __lowercase : str = np.array(UpperCamelCase_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction __lowercase : Tuple = X - np.mean(UpperCamelCase_ ) __lowercase : List[Any] = np.cov(reference_distribution.T ) try: __lowercase : Tuple = np.linalg.inv(UpperCamelCase_ ) except np.linalg.LinAlgError: __lowercase : str = np.linalg.pinv(UpperCamelCase_ ) __lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
76
1
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } a_ = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } a_ = { 'ctrl': 2_5_6, } a_ = { 'Pregnancy': 1_6_8_6_2_9, 'Christianity': 7_6_7_5, 'Explain': 1_0_6_4_2_3, 'Fitness': 6_3_4_4_0, 'Saving': 6_3_1_6_3, 'Ask': 2_7_1_7_1, 'Ass': 9_5_9_8_5, 'Joke': 1_6_3_5_0_9, 'Questions': 4_5_6_2_2, 'Thoughts': 4_9_6_0_5, 'Retail': 5_2_3_4_2, 'Feminism': 1_6_4_3_3_8, 'Writing': 1_1_9_9_2, 'Atheism': 1_9_2_2_6_3, 'Netflix': 4_8_6_1_6, 'Computing': 3_9_6_3_9, 'Opinion': 4_3_2_1_3, 'Alone': 4_4_9_6_7, 'Funny': 5_8_9_1_7, 'Gaming': 4_0_3_5_8, 'Human': 4_0_8_8, 'India': 1_3_3_1, 'Joker': 7_7_1_3_8, 'Diet': 3_6_2_0_6, 'Legal': 1_1_8_5_9, 'Norman': 4_9_3_9, 'Tip': 7_2_6_8_9, 'Weight': 5_2_3_4_3, 'Movies': 4_6_2_7_3, 'Running': 2_3_4_2_5, 'Science': 2_0_9_0, 'Horror': 3_7_7_9_3, 'Confession': 6_0_5_7_2, 'Finance': 1_2_2_5_0, 'Politics': 1_6_3_6_0, 'Scary': 1_9_1_9_8_5, 'Support': 1_2_6_5_4, 'Technologies': 3_2_5_1_6, 'Teenage': 6_6_1_6_0, 'Event': 3_2_7_6_9, 'Learned': 6_7_4_6_0, 'Notion': 1_8_2_7_7_0, 'Wikipedia': 3_7_5_8_3, 'Books': 6_6_6_5, 'Extract': 7_6_0_5_0, 'Confessions': 1_0_2_7_0_1, 'Conspiracy': 7_5_9_3_2, 'Links': 6_3_6_7_4, 'Narcissus': 1_5_0_4_2_5, 'Relationship': 5_4_7_6_6, 'Relationships': 1_3_4_7_9_6, 'Reviews': 4_1_6_7_1, 'News': 4_2_5_6, 'Translation': 2_6_8_2_0, 'multilingual': 1_2_8_4_0_6, } def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Any = set() __lowercase : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase : Any = char __lowercase : List[Any] = set(__UpperCamelCase ) return pairs class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTROL_CODES def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ) -> int: super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: __lowercase : List[Any] = json.load(UpperCamelCase_ ) __lowercase : Any = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: __lowercase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1] __lowercase : Optional[Any] = [tuple(merge.split() ) for merge in merges] __lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowercase : Optional[Any] = {} @property def _lowerCamelCase ( self ) -> Union[str, Any]: return len(self.encoder ) def _lowerCamelCase ( self ) -> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.cache: return self.cache[token] __lowercase : str = tuple(UpperCamelCase_ ) __lowercase : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowercase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __lowercase : Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowercase ,__lowercase : Tuple = bigram __lowercase : int = [] __lowercase : Union[str, Any] = 0 while i < len(UpperCamelCase_ ): try: __lowercase : Optional[int] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase : Tuple = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase : List[str] = tuple(UpperCamelCase_ ) __lowercase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __lowercase : List[str] = get_pairs(UpperCamelCase_ ) __lowercase : Optional[Any] = '''@@ '''.join(UpperCamelCase_ ) __lowercase : Dict = word[:-4] __lowercase : str = word return word def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: __lowercase : List[Any] = [] __lowercase : int = re.findall(R'''\S+\n?''' , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) ) return split_tokens def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> int: return self.decoder.get(UpperCamelCase_ , self.unk_token ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Tuple = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) __lowercase : List[str] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __lowercase : Union[str, Any] = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
76
"""simple docstring""" a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __UpperCAmelCase ( __UpperCamelCase ): # Make sure the supplied data is a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : str = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(__UpperCamelCase ) __lowercase : Any = ''''''.join(bin(__UpperCamelCase )[2:].zfill(8 ) for byte in data ) __lowercase : List[str] = len(__UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __lowercase : int = B'''=''' * ((6 - len(__UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__UpperCamelCase ) % 6) else: __lowercase : Any = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__UpperCamelCase ) , 6 ) ).encode() + padding ) def __UpperCAmelCase ( __UpperCamelCase ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : List[str] = ( '''argument should be a bytes-like object or ASCII string, ''' f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(__UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__UpperCamelCase , __UpperCamelCase ): try: __lowercase : List[str] = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) __lowercase : Dict = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __lowercase : Tuple = encoded_data[:-padding] __lowercase : str = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __lowercase : Any = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __lowercase : int = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__UpperCamelCase ) , 8 ) ] return bytes(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split('''.''' ): __lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __lowercase : int = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase : List[str] = value elif weight_type == "weight_g": __lowercase : Optional[Any] = value elif weight_type == "weight_v": __lowercase : Tuple = value elif weight_type == "bias": __lowercase : Dict = value else: __lowercase : Union[str, Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Tuple = [] __lowercase : Union[str, Any] = fairseq_model.state_dict() __lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __lowercase : int = True if "*" in mapped_key: __lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2] __lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __lowercase : Tuple = '''weight_g''' elif "weight_v" in name: __lowercase : Optional[int] = '''weight_v''' elif "weight" in name: __lowercase : str = '''weight''' elif "bias" in name: __lowercase : Optional[int] = '''bias''' else: __lowercase : List[str] = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1] __lowercase : str = name.split('''.''' ) __lowercase : Dict = int(items[0] ) __lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): if config_path is not None: __lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : str = HubertConfig() if is_finetuned: if dict_path: __lowercase : Tuple = Dictionary.load(__UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : int = target_dict.pad_index __lowercase : Union[str, Any] = target_dict.bos_index __lowercase : int = target_dict.eos_index __lowercase : int = len(target_dict.symbols ) __lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' ) if not os.path.isdir(__UpperCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) ) return os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __UpperCamelCase ) __lowercase : str = WavaVecaCTCTokenizer( __UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , ) __lowercase : str = True if config.feat_extract_norm == '''layer''' else False __lowercase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ) __lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) __lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase ) else: __lowercase : Union[str, Any] = HubertModel(__UpperCamelCase ) if is_finetuned: __lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) a_ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
76
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } a_ = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } a_ = { 'ctrl': 2_5_6, } a_ = { 'Pregnancy': 1_6_8_6_2_9, 'Christianity': 7_6_7_5, 'Explain': 1_0_6_4_2_3, 'Fitness': 6_3_4_4_0, 'Saving': 6_3_1_6_3, 'Ask': 2_7_1_7_1, 'Ass': 9_5_9_8_5, 'Joke': 1_6_3_5_0_9, 'Questions': 4_5_6_2_2, 'Thoughts': 4_9_6_0_5, 'Retail': 5_2_3_4_2, 'Feminism': 1_6_4_3_3_8, 'Writing': 1_1_9_9_2, 'Atheism': 1_9_2_2_6_3, 'Netflix': 4_8_6_1_6, 'Computing': 3_9_6_3_9, 'Opinion': 4_3_2_1_3, 'Alone': 4_4_9_6_7, 'Funny': 5_8_9_1_7, 'Gaming': 4_0_3_5_8, 'Human': 4_0_8_8, 'India': 1_3_3_1, 'Joker': 7_7_1_3_8, 'Diet': 3_6_2_0_6, 'Legal': 1_1_8_5_9, 'Norman': 4_9_3_9, 'Tip': 7_2_6_8_9, 'Weight': 5_2_3_4_3, 'Movies': 4_6_2_7_3, 'Running': 2_3_4_2_5, 'Science': 2_0_9_0, 'Horror': 3_7_7_9_3, 'Confession': 6_0_5_7_2, 'Finance': 1_2_2_5_0, 'Politics': 1_6_3_6_0, 'Scary': 1_9_1_9_8_5, 'Support': 1_2_6_5_4, 'Technologies': 3_2_5_1_6, 'Teenage': 6_6_1_6_0, 'Event': 3_2_7_6_9, 'Learned': 6_7_4_6_0, 'Notion': 1_8_2_7_7_0, 'Wikipedia': 3_7_5_8_3, 'Books': 6_6_6_5, 'Extract': 7_6_0_5_0, 'Confessions': 1_0_2_7_0_1, 'Conspiracy': 7_5_9_3_2, 'Links': 6_3_6_7_4, 'Narcissus': 1_5_0_4_2_5, 'Relationship': 5_4_7_6_6, 'Relationships': 1_3_4_7_9_6, 'Reviews': 4_1_6_7_1, 'News': 4_2_5_6, 'Translation': 2_6_8_2_0, 'multilingual': 1_2_8_4_0_6, } def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Any = set() __lowercase : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase : Any = char __lowercase : List[Any] = set(__UpperCamelCase ) return pairs class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTROL_CODES def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ) -> int: super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: __lowercase : List[Any] = json.load(UpperCamelCase_ ) __lowercase : Any = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: __lowercase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1] __lowercase : Optional[Any] = [tuple(merge.split() ) for merge in merges] __lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowercase : Optional[Any] = {} @property def _lowerCamelCase ( self ) -> Union[str, Any]: return len(self.encoder ) def _lowerCamelCase ( self ) -> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.cache: return self.cache[token] __lowercase : str = tuple(UpperCamelCase_ ) __lowercase : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowercase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __lowercase : Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowercase ,__lowercase : Tuple = bigram __lowercase : int = [] __lowercase : Union[str, Any] = 0 while i < len(UpperCamelCase_ ): try: __lowercase : Optional[int] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase : Tuple = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase : List[str] = tuple(UpperCamelCase_ ) __lowercase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __lowercase : List[str] = get_pairs(UpperCamelCase_ ) __lowercase : Optional[Any] = '''@@ '''.join(UpperCamelCase_ ) __lowercase : Dict = word[:-4] __lowercase : str = word return word def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: __lowercase : List[Any] = [] __lowercase : int = re.findall(R'''\S+\n?''' , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) ) return split_tokens def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> int: return self.decoder.get(UpperCamelCase_ , self.unk_token ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Tuple = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) __lowercase : List[str] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __lowercase : Union[str, Any] = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
76
1
"""simple docstring""" import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["image_processor", "tokenizer"] UpperCamelCase ="AutoImageProcessor" UpperCamelCase ="AutoTokenizer" def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> List[Any]: __lowercase : int = None if "feature_extractor" in kwargs: warnings.warn( '''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`''' ''' instead.''' , UpperCamelCase_ , ) __lowercase : Union[str, Any] = kwargs.pop('''feature_extractor''' ) __lowercase : Optional[int] = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError('''You need to specify an `image_processor`.''' ) if tokenizer is None: raise ValueError('''You need to specify a `tokenizer`.''' ) super().__init__(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Tuple = self.image_processor __lowercase : Tuple = False def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Tuple: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : List[Any] = kwargs.pop('''images''' , UpperCamelCase_ ) __lowercase : str = kwargs.pop('''text''' , UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: __lowercase : List[Any] = args[0] __lowercase : int = args[1:] if images is None and text is None: raise ValueError('''You need to specify either an `images` or `text` input to process.''' ) if images is not None: __lowercase : Any = self.image_processor(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) if text is not None: __lowercase : str = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ ) if text is None: return inputs elif images is None: return encodings else: __lowercase : int = encodings['''input_ids'''] return inputs def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> int: return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str: return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) @contextmanager def _lowerCamelCase ( self ) -> int: warnings.warn( '''`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your ''' '''labels by using the argument `text` of the regular `__call__` method (either in the same call as ''' '''your images inputs, or in a separate call.''' ) __lowercase : int = True __lowercase : Any = self.tokenizer yield __lowercase : List[Any] = self.image_processor __lowercase : Dict = False def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=None ) -> Any: if added_vocab is None: __lowercase : int = self.tokenizer.get_added_vocab() __lowercase : Union[str, Any] = {} while tokens: __lowercase : int = re.search(R'''<s_(.*?)>''' , UpperCamelCase_ , re.IGNORECASE ) if start_token is None: break __lowercase : Tuple = start_token.group(1 ) __lowercase : Tuple = re.search(RF"""</s_{key}>""" , UpperCamelCase_ , re.IGNORECASE ) __lowercase : Dict = start_token.group() if end_token is None: __lowercase : List[str] = tokens.replace(UpperCamelCase_ , '''''' ) else: __lowercase : Tuple = end_token.group() __lowercase : Optional[int] = re.escape(UpperCamelCase_ ) __lowercase : str = re.escape(UpperCamelCase_ ) __lowercase : Union[str, Any] = re.search(F"""{start_token_escaped}(.*?){end_token_escaped}""" , UpperCamelCase_ , re.IGNORECASE ) if content is not None: __lowercase : Any = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __lowercase : Optional[int] = self.tokenajson(UpperCamelCase_ , is_inner_value=UpperCamelCase_ , added_vocab=UpperCamelCase_ ) if value: if len(UpperCamelCase_ ) == 1: __lowercase : int = value[0] __lowercase : List[str] = value else: # leaf nodes __lowercase : Dict = [] for leaf in content.split(R'''<sep/>''' ): __lowercase : int = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __lowercase : Optional[int] = leaf[1:-2] # for categorical special tokens output[key].append(UpperCamelCase_ ) if len(output[key] ) == 1: __lowercase : str = output[key][0] __lowercase : List[Any] = tokens[tokens.find(UpperCamelCase_ ) + len(UpperCamelCase_ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCamelCase_ , added_vocab=UpperCamelCase_ ) if len(UpperCamelCase_ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowerCamelCase ( self ) -> Optional[int]: warnings.warn( '''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCamelCase_ , ) return self.image_processor_class @property def _lowerCamelCase ( self ) -> Optional[Any]: warnings.warn( '''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCamelCase_ , ) return self.image_processor
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ , UpperCamelCase_=13 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=5 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=16 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=3 , UpperCamelCase_=4 , UpperCamelCase_=None , ) -> List[Any]: __lowercase : str = parent __lowercase : List[Any] = batch_size __lowercase : Optional[int] = seq_length __lowercase : Dict = is_training __lowercase : Optional[int] = use_input_mask __lowercase : List[Any] = use_token_type_ids __lowercase : List[Any] = use_labels __lowercase : List[Any] = vocab_size __lowercase : Optional[int] = hidden_size __lowercase : Dict = num_hidden_layers __lowercase : int = num_attention_heads __lowercase : Optional[Any] = intermediate_size __lowercase : Any = hidden_act __lowercase : Dict = hidden_dropout_prob __lowercase : str = attention_probs_dropout_prob __lowercase : Optional[int] = max_position_embeddings __lowercase : Optional[Any] = type_vocab_size __lowercase : Any = type_sequence_label_size __lowercase : Any = initializer_range __lowercase : Dict = num_labels __lowercase : List[str] = num_choices __lowercase : str = scope def _lowerCamelCase ( self ) -> List[str]: __lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : str = None if self.use_input_mask: __lowercase : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __lowercase : str = None if self.use_token_type_ids: __lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __lowercase : List[str] = None __lowercase : Tuple = None __lowercase : Tuple = None if self.use_labels: __lowercase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __lowercase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __lowercase : str = ids_tensor([self.batch_size] , self.num_choices ) __lowercase : Optional[int] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCamelCase ( self ) -> List[Any]: return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Dict = BioGptModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowercase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ) __lowercase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]: __lowercase : Union[str, Any] = BioGptForCausalLM(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowercase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ) -> Tuple: __lowercase : Any = BioGptModel(config=UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() # create attention mask __lowercase : str = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_ ) __lowercase : Dict = self.seq_length // 2 __lowercase : str = 0 # first forward pass __lowercase ,__lowercase : str = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ ).to_tuple() # create hypothetical next token and extent to next_input_ids __lowercase : Any = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __lowercase : int = ids_tensor((1,) , UpperCamelCase_ ).item() + 1 __lowercase : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __lowercase : str = random_other_next_tokens # append to next input_ids and attn_mask __lowercase : Dict = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase : List[Any] = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCamelCase_ )] , dim=1 , ) # get two different outputs __lowercase : Tuple = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''last_hidden_state'''] __lowercase : str = model(UpperCamelCase_ , past_key_values=UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''last_hidden_state'''] # select random slice __lowercase : int = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase : List[str] = output_from_no_past[:, -1, random_slice_idx].detach() __lowercase : str = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ) -> Dict: __lowercase : str = BioGptModel(config=UpperCamelCase_ ).to(UpperCamelCase_ ).eval() __lowercase : List[Any] = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase_ ) # first forward pass __lowercase : int = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ ) __lowercase ,__lowercase : List[str] = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __lowercase : int = ids_tensor((self.batch_size, 3) , config.vocab_size ) __lowercase : str = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __lowercase : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 ) __lowercase : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __lowercase : Optional[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )['''last_hidden_state'''] __lowercase : List[Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ )[ '''last_hidden_state''' ] # select random slice __lowercase : Optional[int] = ids_tensor((1,) , output_from_past.shape[-1] ).item() __lowercase : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx].detach() __lowercase : Union[str, Any] = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False ) -> Union[str, Any]: __lowercase : Any = BioGptForCausalLM(UpperCamelCase_ ) model.to(UpperCamelCase_ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __lowercase : Dict = model(UpperCamelCase_ , labels=UpperCamelCase_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ ) -> List[str]: __lowercase : List[str] = BioGptModel(UpperCamelCase_ ) __lowercase : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.0_0_1 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.0_1 ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , *UpperCamelCase_ ) -> Any: __lowercase : Optional[Any] = self.num_labels __lowercase : List[str] = BioGptForTokenClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowerCamelCase ( self ) -> Dict: __lowercase : Tuple = self.prepare_config_and_inputs() ( ( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) ,( __lowercase ) , ) : Optional[Any] = config_and_inputs __lowercase : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( snake_case , snake_case , snake_case , unittest.TestCase ): UpperCamelCase =( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) UpperCamelCase =(BioGptForCausalLM,) if is_torch_available() else () UpperCamelCase =( { "feature-extraction": BioGptModel, "text-classification": BioGptForSequenceClassification, "text-generation": BioGptForCausalLM, "token-classification": BioGptForTokenClassification, "zero-shot": BioGptForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase =False def _lowerCamelCase ( self ) -> List[Any]: __lowercase : int = BioGptModelTester(self ) __lowercase : Any = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 ) def _lowerCamelCase ( self ) -> Tuple: self.config_tester.run_common_tests() def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Dict: __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __lowercase : List[Any] = type self.model_tester.create_and_check_model(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[int]: __lowercase : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> str: __lowercase : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCamelCase_ , gradient_checkpointing=UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Dict: __lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: __lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCamelCase_ ) @slow def _lowerCamelCase ( self ) -> str: __lowercase : Tuple = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(UpperCamelCase_ ) __lowercase : List[Any] = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __lowercase : int = '''left''' # Define PAD Token = EOS Token = 50256 __lowercase : Tuple = tokenizer.eos_token __lowercase : Optional[Any] = model.config.eos_token_id # use different length sentences to test batching __lowercase : Optional[int] = [ '''Hello, my dog is a little''', '''Today, I''', ] __lowercase : Tuple = tokenizer(UpperCamelCase_ , return_tensors='''pt''' , padding=UpperCamelCase_ ) __lowercase : Dict = inputs['''input_ids'''].to(UpperCamelCase_ ) __lowercase : str = model.generate( input_ids=UpperCamelCase_ , attention_mask=inputs['''attention_mask'''].to(UpperCamelCase_ ) , ) __lowercase : Union[str, Any] = tokenizer(sentences[0] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ ) __lowercase : Dict = model.generate(input_ids=UpperCamelCase_ ) __lowercase : int = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item() __lowercase : List[Any] = tokenizer(sentences[1] , return_tensors='''pt''' ).input_ids.to(UpperCamelCase_ ) __lowercase : Any = model.generate(input_ids=UpperCamelCase_ , max_length=model.config.max_length - num_paddings ) __lowercase : Union[str, Any] = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ ) __lowercase : Union[str, Any] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase_ ) __lowercase : Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase_ ) __lowercase : Dict = [ '''Hello, my dog is a little bit bigger than a little bit.''', '''Today, I have a good idea of how to use the information''', ] self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , [non_padded_sentence, padded_sentence] ) @slow def _lowerCamelCase ( self ) -> int: for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : List[Any] = BioGptModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase ,__lowercase : str = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Optional[int] = 3 __lowercase : Optional[Any] = input_dict['''input_ids'''] __lowercase : str = input_ids.ne(1 ).to(UpperCamelCase_ ) __lowercase : Dict = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __lowercase : Optional[Any] = BioGptForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowercase : Union[str, Any] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowerCamelCase ( self ) -> str: __lowercase ,__lowercase : Any = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : str = 3 __lowercase : Optional[int] = '''multi_label_classification''' __lowercase : List[str] = input_dict['''input_ids'''] __lowercase : Dict = input_ids.ne(1 ).to(UpperCamelCase_ ) __lowercase : Optional[int] = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __lowercase : Optional[int] = BioGptForSequenceClassification(UpperCamelCase_ ) model.to(UpperCamelCase_ ) model.eval() __lowercase : List[str] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): @slow def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : int = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) __lowercase : List[Any] = torch.tensor([[2, 48_05, 9, 6_56, 21]] ) __lowercase : int = model(UpperCamelCase_ )[0] __lowercase : Dict = 4_23_84 __lowercase : Tuple = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase_ ) __lowercase : Union[str, Any] = torch.tensor( [[[-9.5_2_3_6, -9.8_9_1_8, 1_0.4_5_5_7], [-1_1.0_4_6_9, -9.6_4_2_3, 8.1_0_2_2], [-8.8_6_6_4, -7.8_8_2_6, 5.5_3_2_5]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase_ , atol=1E-4 ) ) @slow def _lowerCamelCase ( self ) -> List[str]: __lowercase : int = BioGptTokenizer.from_pretrained('''microsoft/biogpt''' ) __lowercase : List[str] = BioGptForCausalLM.from_pretrained('''microsoft/biogpt''' ) model.to(UpperCamelCase_ ) torch.manual_seed(0 ) __lowercase : int = tokenizer('''COVID-19 is''' , return_tensors='''pt''' ).to(UpperCamelCase_ ) __lowercase : Dict = model.generate( **UpperCamelCase_ , min_length=1_00 , max_length=10_24 , num_beams=5 , early_stopping=UpperCamelCase_ , ) __lowercase : str = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase_ ) __lowercase : Optional[int] = ( '''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the''' ''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and''' ''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),''' ''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and''' ''' more than 800,000 deaths.''' ) self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
76
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = '▁' a_ = {'vocab_file': 'sentencepiece.bpe.model'} a_ = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } a_ = { 'xlm-roberta-base': 5_1_2, 'xlm-roberta-large': 5_1_2, 'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2, 'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2, 'xlm-roberta-large-finetuned-conll03-english': 5_1_2, 'xlm-roberta-large-finetuned-conll03-german': 5_1_2, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __lowercase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __lowercase : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase : Tuple = 1 __lowercase : Any = len(self.sp_model ) + self.fairseq_offset __lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: __lowercase : int = self.__dict__.copy() __lowercase : int = None __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ) -> Tuple: __lowercase : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowercase : str = {} __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase : Dict = [self.cls_token_id] __lowercase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: __lowercase : Optional[Any] = [self.sep_token_id] __lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _lowerCamelCase ( self ) -> str: __lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
76
1
"""simple docstring""" from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput a_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Tuple: super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Union[str, Any] = eval_examples __lowercase : Union[str, Any] = post_process_function __lowercase : Any = quant_trainer_args __lowercase : Optional[Any] = 1_28 # default number of calibration samples def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __lowercase : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset __lowercase : str = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' ) return DataLoader( UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , ) def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: __lowercase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset __lowercase : List[Any] = self.get_calib_dataloader(UpperCamelCase_ ) __lowercase : Dict = self.model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ ) model.eval() quant_trainer.enable_calibration(UpperCamelCase_ ) logger.info('''***** Running calibration *****''' ) logger.info(F""" Num examples = {self.calib_num}""" ) logger.info(F""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(UpperCamelCase_ ): # Prediction step __lowercase ,__lowercase ,__lowercase : Optional[Any] = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = model def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> str: __lowercase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __lowercase : Union[str, Any] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowercase : Optional[int] = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Tuple = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __lowercase : int = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) self.log(UpperCamelCase_ ) else: __lowercase : Dict = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowercase : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> List[Any]: __lowercase : Optional[int] = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. __lowercase : str = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Union[str, Any] = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : Any = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __lowercase : Dict = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_="./" ) -> int: __lowercase : Optional[int] = self.eval_dataset __lowercase : Optional[int] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : Any = next(iter(UpperCamelCase_ ) ) # saving device - to make it consistent __lowercase : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __lowercase : Tuple = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __lowercase : List[Any] = True __lowercase : int = self.model.to(UpperCamelCase_ ) model.eval() model.float() __lowercase : Optional[int] = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = os.path.join(UpperCamelCase_ , '''model.onnx''' ) logger.info(F"""exporting model to {output_model_file}""" ) __lowercase : Tuple = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=UpperCamelCase_ , ) logger.info('''onnx export finished''' )
76
1
"""simple docstring""" from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ , ) -> List[Any]: __lowercase : List[str] = parent __lowercase : Tuple = 13 __lowercase : int = 7 __lowercase : str = 30 __lowercase : int = self.seq_length + self.mem_len __lowercase : Optional[Any] = 15 __lowercase : Optional[int] = True __lowercase : Union[str, Any] = True __lowercase : Optional[int] = 99 __lowercase : int = [10, 50, 80] __lowercase : List[str] = 32 __lowercase : str = 32 __lowercase : Optional[Any] = 4 __lowercase : Any = 8 __lowercase : Dict = 1_28 __lowercase : Optional[int] = 2 __lowercase : Any = 2 __lowercase : Dict = None __lowercase : Optional[Any] = 1 __lowercase : Optional[int] = 0 __lowercase : Optional[Any] = 3 __lowercase : int = self.vocab_size - 1 __lowercase : List[Any] = 0.0_1 def _lowerCamelCase ( self ) -> Dict: __lowercase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Optional[int] = None if self.use_labels: __lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __lowercase : Tuple = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def _lowerCamelCase ( self ) -> int: random.seed(self.seed ) tf.random.set_seed(self.seed ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: __lowercase : Dict = TFTransfoXLModel(UpperCamelCase_ ) __lowercase ,__lowercase : int = model(UpperCamelCase_ ).to_tuple() __lowercase : str = {'''input_ids''': input_ids_a, '''mems''': mems_a} __lowercase ,__lowercase : int = model(UpperCamelCase_ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: __lowercase : List[str] = TFTransfoXLLMHeadModel(UpperCamelCase_ ) __lowercase ,__lowercase : Optional[int] = model(UpperCamelCase_ ).to_tuple() __lowercase : Dict = {'''input_ids''': input_ids_a, '''labels''': lm_labels} __lowercase ,__lowercase : int = model(UpperCamelCase_ ).to_tuple() __lowercase ,__lowercase : Optional[int] = model([input_ids_a, mems_a] ).to_tuple() __lowercase : Optional[Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels} __lowercase ,__lowercase : Tuple = model(UpperCamelCase_ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[Any]: __lowercase : Tuple = TFTransfoXLForSequenceClassification(UpperCamelCase_ ) __lowercase : Optional[int] = model(UpperCamelCase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : List[str] = self.prepare_config_and_inputs() ((__lowercase) ,(__lowercase) ,(__lowercase) ,(__lowercase)) : int = config_and_inputs __lowercase : Tuple = {'''input_ids''': input_ids_a} return config, inputs_dict @require_tf class UpperCAmelCase_ ( snake_case , snake_case , unittest.TestCase ): UpperCamelCase =( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) UpperCamelCase =() if is_tf_available() else () UpperCamelCase =( { "feature-extraction": TFTransfoXLModel, "text-classification": TFTransfoXLForSequenceClassification, "text-generation": TFTransfoXLLMHeadModel, "zero-shot": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented UpperCamelCase =False UpperCamelCase =False UpperCamelCase =False UpperCamelCase =False def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Optional[Any] = TFTransfoXLModelTester(self ) __lowercase : List[str] = ConfigTester(self , config_class=UpperCamelCase_ , d_embed=37 ) def _lowerCamelCase ( self ) -> List[Any]: self.config_tester.run_common_tests() def _lowerCamelCase ( self ) -> List[Any]: self.model_tester.set_seed() __lowercase : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: self.model_tester.set_seed() __lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> str: __lowercase : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: __lowercase ,__lowercase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() __lowercase : Any = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __lowercase : List[Any] = model_class(UpperCamelCase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __lowercase : int = model.get_output_embeddings() assert isinstance(UpperCamelCase_ , tf.keras.layers.Layer ) __lowercase : int = model.get_bias() assert name is None else: __lowercase : Optional[int] = model.get_output_embeddings() assert x is None __lowercase : Any = model.get_bias() assert name is None def _lowerCamelCase ( self ) -> Optional[int]: # TODO JP: Make TransfoXL XLA compliant pass @slow def _lowerCamelCase ( self ) -> Union[str, Any]: for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __lowercase : int = TFTransfoXLModel.from_pretrained(UpperCamelCase_ ) self.assertIsNotNone(UpperCamelCase_ ) @unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' ) def _lowerCamelCase ( self ) -> List[str]: pass @require_tf class UpperCAmelCase_ ( unittest.TestCase ): @unittest.skip('''Skip test until #12651 is resolved.''' ) @slow def _lowerCamelCase ( self ) -> List[Any]: __lowercase : int = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' ) # fmt: off __lowercase : Tuple = tf.convert_to_tensor([[33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __lowercase : Union[str, Any] = [33,12_97,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,22,17_06,17,2_00_98,5,32_15,21,37,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,62_24,8_31,1_60_02,2,8,6_03,7_89_67,2_95_46,23,8_03,20,25,4_16,5,8,2_32,4,2_77,6,18_55,46_01,3,2_95_46,54,8,36_09,5,5_72_11,49,4,1,2_77,18,8,17_55,1_56_91,3,3_41,25,4_16,6_93,4_25_73,71,17,4_01,94,31,1_79_19,2,2_95_46,78_73,18,1,4_35,23,1_10_11,7_55,5,51_67,3,79_83,98,84,2,2_95_46,32_67,8,36_09,4,1,48_65,10_75,2,60_87,71,6,3_46,8,58_54,3,2_95_46,8_24,14_00,18_68,2,19,1_60,2,3_11,8,54_96,2,2_09_20,17,25,1_50_97,3,24,24,0,33,1,18_57,2,1,10_09,4,11_09,1_17_39,47_62,3_58,5,25,2_45,28,11_10,3,13,10_41,4,24,6_03,4_90,2,7_14_77,2_00_98,10_44_47,2,2_09_61,1,26_04,4,1,3_29,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __lowercase : List[str] = model.generate(UpperCamelCase_ , max_length=2_00 , do_sample=UpperCamelCase_ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
76
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" __lowercase : Dict = float(embedding_dim // 2 ) __lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) __lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 ) # scale embeddings __lowercase : Optional[int] = scale * emb if flip_sin_to_cos: __lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 ) else: __lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 ) __lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] ) return signal class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =jnp.floataa @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ ) __lowercase : str = nn.silu(UpperCamelCase_ ) __lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ ) return temb class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =False UpperCamelCase =1 @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: return get_sinusoidal_embeddings( UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
76
1
"""simple docstring""" from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = "x" , __UpperCamelCase = 10**-10 , __UpperCamelCase = 1 , ): __lowercase : List[Any] = symbols(__UpperCamelCase ) __lowercase : List[Any] = lambdify(__UpperCamelCase , __UpperCamelCase ) __lowercase : Optional[int] = lambdify(__UpperCamelCase , diff(__UpperCamelCase , __UpperCamelCase ) ) __lowercase : Dict = starting_point while True: if diff_function(__UpperCamelCase ) != 0: __lowercase : List[str] = prev_guess - multiplicity * func(__UpperCamelCase ) / diff_function( __UpperCamelCase ) else: raise ZeroDivisionError('''Could not find root''' ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess __lowercase : int = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial # Find fourth Root of 5 print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}") # Find value of e print( 'The root of log(y) - 1 = 0 is ', F"{newton_raphson('log(y) - 1', 2, variable='y')}", ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', F"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}", ) # Find root of cos(x) print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
76
"""simple docstring""" import os import sys a_ = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a_ = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModel.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
76
1
"""simple docstring""" from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
76
"""simple docstring""" from math import pi, sqrt, tan def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __lowercase : int = (sidea + sidea + sidea) / 2 __lowercase : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(F"Rectangle: {area_rectangle(1_0, 2_0) = }") print(F"Square: {area_square(1_0) = }") print(F"Triangle: {area_triangle(1_0, 1_0) = }") print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }") print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }") print(F"Rhombus: {area_rhombus(1_0, 2_0) = }") print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }") print(F"Circle: {area_circle(2_0) = }") print(F"Ellipse: {area_ellipse(1_0, 2_0) = }") print('\nSurface Areas of various geometric shapes: \n') print(F"Cube: {surface_area_cube(2_0) = }") print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }") print(F"Sphere: {surface_area_sphere(2_0) = }") print(F"Hemisphere: {surface_area_hemisphere(2_0) = }") print(F"Cone: {surface_area_cone(1_0, 2_0) = }") print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }") print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }") print(F"Torus: {surface_area_torus(2_0, 1_0) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }") print(F"Square: {area_reg_polygon(4, 1_0) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
76
1
"""simple docstring""" class UpperCAmelCase_ : def __init__( self ) -> None: __lowercase : dict[str, TrieNode] = {} # Mapping from char to TrieNode __lowercase : Dict = False def _lowerCamelCase ( self , UpperCamelCase_ ) -> None: for word in words: self.insert(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> None: __lowercase : List[str] = self for char in word: if char not in curr.nodes: __lowercase : Optional[Any] = TrieNode() __lowercase : List[Any] = curr.nodes[char] __lowercase : List[str] = True def _lowerCamelCase ( self , UpperCamelCase_ ) -> bool: __lowercase : List[str] = self for char in word: if char not in curr.nodes: return False __lowercase : Tuple = curr.nodes[char] return curr.is_leaf def _lowerCamelCase ( self , UpperCamelCase_ ) -> None: def _delete(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> bool: if index == len(UpperCamelCase_ ): # If word does not exist if not curr.is_leaf: return False __lowercase : str = False return len(curr.nodes ) == 0 __lowercase : Any = word[index] __lowercase : List[str] = curr.nodes.get(UpperCamelCase_ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __lowercase : Optional[int] = _delete(UpperCamelCase_ , UpperCamelCase_ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , UpperCamelCase_ , 0 ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if node.is_leaf: print(__UpperCamelCase , end=''' ''' ) for key, value in node.nodes.items(): print_words(__UpperCamelCase , word + key ) def __UpperCAmelCase ( ): __lowercase : Optional[int] = '''banana bananas bandana band apple all beast'''.split() __lowercase : str = TrieNode() root.insert_many(__UpperCamelCase ) # print_words(root, "") assert all(root.find(__UpperCamelCase ) for word in words ) assert root.find('''banana''' ) assert not root.find('''bandanas''' ) assert not root.find('''apps''' ) assert root.find('''apple''' ) assert root.find('''all''' ) root.delete('''all''' ) assert not root.find('''all''' ) root.delete('''banana''' ) assert not root.find('''banana''' ) assert root.find('''bananas''' ) return True def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): print(str(__UpperCamelCase ) , '''works!''' if passes else '''doesn\'t work :(''' ) def __UpperCAmelCase ( ): assert test_trie() def __UpperCAmelCase ( ): print_results('''Testing trie functionality''' , test_trie() ) if __name__ == "__main__": main()
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # noqa: E741 while r - l > 1: __lowercase : int = (l + r) // 2 if v[m] >= key: __lowercase : Any = m else: __lowercase : List[Any] = m # noqa: E741 return r def __UpperCAmelCase ( __UpperCamelCase ): if len(__UpperCamelCase ) == 0: return 0 __lowercase : List[str] = [0] * len(__UpperCamelCase ) __lowercase : Any = 1 __lowercase : Dict = v[0] for i in range(1 , len(__UpperCamelCase ) ): if v[i] < tail[0]: __lowercase : Tuple = v[i] elif v[i] > tail[length - 1]: __lowercase : Optional[Any] = v[i] length += 1 else: __lowercase : Dict = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ = None ) -> None: if components is None: __lowercase : Optional[int] = [] __lowercase : List[Any] = list(UpperCamelCase_ ) def __len__( self ) -> int: return len(self.__components ) def __str__( self ) -> str: return "(" + ",".join(map(UpperCamelCase_ , self.__components ) ) + ")" def __add__( self , UpperCamelCase_ ) -> Vector: __lowercase : Any = len(self ) if size == len(UpperCamelCase_ ): __lowercase : List[str] = [self.__components[i] + other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )] return Vector(UpperCamelCase_ ) else: raise Exception('''must have the same size''' ) def __sub__( self , UpperCamelCase_ ) -> Vector: __lowercase : Dict = len(self ) if size == len(UpperCamelCase_ ): __lowercase : str = [self.__components[i] - other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )] return Vector(UpperCamelCase_ ) else: # error case raise Exception('''must have the same size''' ) @overload def __mul__( self , UpperCamelCase_ ) -> Vector: ... @overload def __mul__( self , UpperCamelCase_ ) -> float: ... def __mul__( self , UpperCamelCase_ ) -> float | Vector: if isinstance(UpperCamelCase_ , (float, int) ): __lowercase : List[str] = [c * other for c in self.__components] return Vector(UpperCamelCase_ ) elif isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(self ) == len(UpperCamelCase_ ): __lowercase : Optional[Any] = len(self ) __lowercase : str = [self.__components[i] * other.component(UpperCamelCase_ ) for i in range(UpperCamelCase_ )] return sum(UpperCamelCase_ ) else: # error case raise Exception('''invalid operand!''' ) def _lowerCamelCase ( self ) -> Vector: return Vector(self.__components ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> float: if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception('''index out of range''' ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> None: assert -len(self.__components ) <= pos < len(self.__components ) __lowercase : List[str] = value def _lowerCamelCase ( self ) -> float: if len(self.__components ) == 0: raise Exception('''Vector is empty''' ) __lowercase : List[Any] = [c**2 for c in self.__components] return math.sqrt(sum(UpperCamelCase_ ) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = False ) -> float: __lowercase : List[str] = self * other __lowercase : int = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def __UpperCAmelCase ( __UpperCamelCase ): assert isinstance(__UpperCamelCase , __UpperCamelCase ) return Vector([0] * dimension ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): assert isinstance(__UpperCamelCase , __UpperCamelCase ) and (isinstance(__UpperCamelCase , __UpperCamelCase )) __lowercase : List[Any] = [0] * dimension __lowercase : List[Any] = 1 return Vector(__UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): assert ( isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase ) and (isinstance(__UpperCamelCase , (int, float) )) ) return x * scalar + y def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): random.seed(__UpperCamelCase ) __lowercase : Any = [random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )] return Vector(__UpperCamelCase ) class UpperCAmelCase_ : def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None: __lowercase : List[Any] = matrix __lowercase : Optional[int] = w __lowercase : Union[str, Any] = h def __str__( self ) -> str: __lowercase : List[Any] = '''''' for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self , UpperCamelCase_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __lowercase : Any = [] for i in range(self.__height ): __lowercase : Tuple = [ self.__matrix[i][j] + other.component(UpperCamelCase_ , UpperCamelCase_ ) for j in range(self.__width ) ] matrix.append(UpperCamelCase_ ) return Matrix(UpperCamelCase_ , self.__width , self.__height ) else: raise Exception('''matrix must have the same dimension!''' ) def __sub__( self , UpperCamelCase_ ) -> Matrix: if self.__width == other.width() and self.__height == other.height(): __lowercase : Dict = [] for i in range(self.__height ): __lowercase : str = [ self.__matrix[i][j] - other.component(UpperCamelCase_ , UpperCamelCase_ ) for j in range(self.__width ) ] matrix.append(UpperCamelCase_ ) return Matrix(UpperCamelCase_ , self.__width , self.__height ) else: raise Exception('''matrices must have the same dimension!''' ) @overload def __mul__( self , UpperCamelCase_ ) -> Matrix: ... @overload def __mul__( self , UpperCamelCase_ ) -> Vector: ... def __mul__( self , UpperCamelCase_ ) -> Vector | Matrix: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): # matrix-vector if len(UpperCamelCase_ ) == self.__width: __lowercase : str = zero_vector(self.__height ) for i in range(self.__height ): __lowercase : List[Any] = [ self.__matrix[i][j] * other.component(UpperCamelCase_ ) for j in range(self.__width ) ] ans.change_component(UpperCamelCase_ , sum(UpperCamelCase_ ) ) return ans else: raise Exception( '''vector must have the same size as the ''' '''number of columns of the matrix!''' ) elif isinstance(UpperCamelCase_ , (int, float) ): # matrix-scalar __lowercase : Any = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(UpperCamelCase_ , self.__width , self.__height ) return None def _lowerCamelCase ( self ) -> int: return self.__height def _lowerCamelCase ( self ) -> int: return self.__width def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> float: if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception('''change_component: indices out of bounds''' ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> None: if 0 <= x < self.__height and 0 <= y < self.__width: __lowercase : int = value else: raise Exception('''change_component: indices out of bounds''' ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> float: if self.__height != self.__width: raise Exception('''Matrix is not square''' ) __lowercase : Any = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(UpperCamelCase_ ) ): __lowercase : List[str] = minor[i][:y] + minor[i][y + 1 :] return Matrix(UpperCamelCase_ , self.__width - 1 , self.__height - 1 ).determinant() def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> float: if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(UpperCamelCase_ , UpperCamelCase_ ) else: raise Exception('''Indices out of bounds''' ) def _lowerCamelCase ( self ) -> float: if self.__height != self.__width: raise Exception('''Matrix is not square''' ) if self.__height < 1: raise Exception('''Matrix has no element''' ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __lowercase : str = [ self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase_ ) for y in range(self.__width ) ] return sum(UpperCamelCase_ ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : list[list[float]] = [[0] * n for _ in range(__UpperCamelCase )] return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): random.seed(__UpperCamelCase ) __lowercase : list[list[float]] = [ [random.randint(__UpperCamelCase , __UpperCamelCase ) for _ in range(__UpperCamelCase )] for _ in range(__UpperCamelCase ) ] return Matrix(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase = 4 ): __lowercase : Dict = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Union[str, Any] = matrix[::-1] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [x[::-1] for x in matrix] return matrix def __UpperCAmelCase ( __UpperCamelCase ): for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
1
"""simple docstring""" import fire from utils import calculate_rouge, save_json def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , **__UpperCamelCase ): __lowercase : Tuple = [x.strip() for x in open(__UpperCamelCase ).readlines()] __lowercase : Dict = [x.strip() for x in open(__UpperCamelCase ).readlines()][: len(__UpperCamelCase )] __lowercase : Union[str, Any] = calculate_rouge(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) if save_path is not None: save_json(__UpperCamelCase , __UpperCamelCase , indent=__UpperCamelCase ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
76
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(snake_case ) class UpperCAmelCase_ : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __lowercase : int = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __lowercase : str = len(UpperCamelCase_ ) __lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" ) __lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : Optional[Any] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __lowercase : str = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : List[str] = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]: __lowercase : List[Any] = reader_input['''input_ids'''] __lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3] __lowercase : Optional[int] = len(UpperCamelCase_ ) __lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : Any = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __lowercase : List[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]: __lowercase : Tuple = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __lowercase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowercase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase =["input_ids", "attention_mask"]
76
1
"""simple docstring""" from collections.abc import Callable def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : float = a __lowercase : float = b if function(__UpperCamelCase ) == 0: # one of the a or b is a root for the function return a elif function(__UpperCamelCase ) == 0: return b elif ( function(__UpperCamelCase ) * function(__UpperCamelCase ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError('''could not find root in given interval.''' ) else: __lowercase : float = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(__UpperCamelCase ) == 0: return mid elif function(__UpperCamelCase ) * function(__UpperCamelCase ) < 0: __lowercase : str = mid else: __lowercase : Optional[Any] = mid __lowercase : List[str] = start + (end - start) / 2.0 return mid def __UpperCAmelCase ( __UpperCamelCase ): return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1_0_0_0)) import doctest doctest.testmod()
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="sew" def __init__( self , UpperCamelCase_=32 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_=2 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_="group" , UpperCamelCase_="gelu" , UpperCamelCase_=(64, 1_28, 1_28, 1_28, 1_28, 2_56, 2_56, 2_56, 2_56, 5_12, 5_12, 5_12, 5_12) , UpperCamelCase_=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase_=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase_=False , UpperCamelCase_=1_28 , UpperCamelCase_=16 , UpperCamelCase_=True , UpperCamelCase_=0.0_5 , UpperCamelCase_=10 , UpperCamelCase_=2 , UpperCamelCase_=0.0 , UpperCamelCase_=10 , UpperCamelCase_=0 , UpperCamelCase_="mean" , UpperCamelCase_=False , UpperCamelCase_=False , UpperCamelCase_=2_56 , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , **UpperCamelCase_ , ) -> Dict: super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ ) __lowercase : Any = hidden_size __lowercase : str = feat_extract_norm __lowercase : List[str] = feat_extract_activation __lowercase : Optional[int] = list(UpperCamelCase_ ) __lowercase : Optional[Any] = list(UpperCamelCase_ ) __lowercase : Tuple = list(UpperCamelCase_ ) __lowercase : List[Any] = conv_bias __lowercase : Dict = num_conv_pos_embeddings __lowercase : Optional[int] = num_conv_pos_embedding_groups __lowercase : str = len(self.conv_dim ) __lowercase : Union[str, Any] = num_hidden_layers __lowercase : List[Any] = intermediate_size __lowercase : List[Any] = squeeze_factor __lowercase : int = hidden_act __lowercase : Optional[Any] = num_attention_heads __lowercase : List[Any] = hidden_dropout __lowercase : int = attention_dropout __lowercase : str = activation_dropout __lowercase : Union[str, Any] = feat_proj_dropout __lowercase : Tuple = final_dropout __lowercase : Union[str, Any] = layerdrop __lowercase : Dict = layer_norm_eps __lowercase : Tuple = initializer_range __lowercase : Union[str, Any] = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect.''' '''It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,''' F"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)""" F"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __lowercase : Any = apply_spec_augment __lowercase : Optional[int] = mask_time_prob __lowercase : int = mask_time_length __lowercase : Dict = mask_time_min_masks __lowercase : Any = mask_feature_prob __lowercase : Tuple = mask_feature_length __lowercase : List[str] = mask_feature_min_masks # ctc loss __lowercase : int = ctc_loss_reduction __lowercase : str = ctc_zero_infinity # sequence classification __lowercase : str = use_weighted_layer_sum __lowercase : Tuple = classifier_proj_size @property def _lowerCamelCase ( self ) -> str: return functools.reduce(operator.mul , self.conv_stride , 1 )
76
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __UpperCAmelCase ( __UpperCamelCase ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __lowercase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __lowercase : Dict = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __lowercase : Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __lowercase : Tuple = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __lowercase : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __lowercase : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __lowercase : Optional[int] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __lowercase : Union[str, Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __lowercase : str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __lowercase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __lowercase : str = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __lowercase : Dict = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __lowercase : Union[str, Any] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __lowercase : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' ) __lowercase : Any = key.replace('''image_projection''' , '''flava.image_projection''' ) __lowercase : Tuple = value.float() for key, value in codebook_state_dict.items(): __lowercase : int = value return upgrade @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): if config_path is not None: __lowercase : Union[str, Any] = FlavaConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : Union[str, Any] = FlavaConfig() __lowercase : Any = FlavaForPreTraining(__UpperCamelCase ).eval() __lowercase : Any = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): __lowercase : Optional[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Optional[int] = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) __lowercase : Union[str, Any] = hf_model.state_dict() __lowercase : Optional[Any] = count_parameters(__UpperCamelCase ) __lowercase : List[Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
76
1
"""simple docstring""" import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : str = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase ) __lowercase : Any = downstream_dict['''projector.weight'''] __lowercase : Dict = downstream_dict['''projector.bias'''] __lowercase : str = downstream_dict['''model.post_net.linear.weight'''] __lowercase : Any = downstream_dict['''model.post_net.linear.bias'''] return model def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : str = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase ) __lowercase : Optional[int] = downstream_dict['''model.linear.weight'''] __lowercase : Dict = downstream_dict['''model.linear.bias'''] return model def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : int = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase ) __lowercase : Optional[int] = downstream_dict['''connector.weight'''] __lowercase : List[str] = downstream_dict['''connector.bias'''] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __lowercase : Dict = downstream_dict[ f"""model.framelevel_feature_extractor.module.{i}.kernel.weight""" ] __lowercase : Optional[Any] = downstream_dict[f"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""] __lowercase : str = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight'''] __lowercase : int = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias'''] __lowercase : str = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight'''] __lowercase : Tuple = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias'''] __lowercase : Optional[int] = downstream_dict['''objective.W'''] return model @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : int = torch.load(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Union[str, Any] = checkpoint['''Downstream'''] __lowercase : Tuple = WavaVecaConfig.from_pretrained(__UpperCamelCase ) __lowercase : str = WavaVecaFeatureExtractor.from_pretrained( __UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase ) __lowercase : List[Any] = hf_config.architectures[0] if arch.endswith('''ForSequenceClassification''' ): __lowercase : str = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) elif arch.endswith('''ForAudioFrameClassification''' ): __lowercase : int = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) elif arch.endswith('''ForXVector''' ): __lowercase : Any = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) else: raise NotImplementedError(f"""S3PRL weights conversion is not supported for {arch}""" ) if hf_config.use_weighted_layer_sum: __lowercase : Tuple = checkpoint['''Featurizer''']['''weights'''] hf_feature_extractor.save_pretrained(__UpperCamelCase ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') a_ = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: super().__init__(**UpperCamelCase_ ) __lowercase : List[str] = size if size is not None else {'''shortest_edge''': 2_56} __lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowercase : Dict = get_size_dict(UpperCamelCase_ ) __lowercase : Dict = do_resize __lowercase : Optional[Any] = size __lowercase : List[Any] = resample __lowercase : Dict = do_center_crop __lowercase : Any = crop_size __lowercase : List[str] = do_rescale __lowercase : List[str] = rescale_factor __lowercase : Optional[Any] = do_normalize __lowercase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : List[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ) -> np.ndarray: return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> Optional[Any]: __lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Tuple = size if size is not None else self.size __lowercase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[str] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(UpperCamelCase_ ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : Any = image_std if image_std is not None else self.image_std __lowercase : Any = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowercase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowercase : Any = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowercase : str = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowercase : Optional[int] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowercase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowercase : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
76
1
"""simple docstring""" a_ = 'Input must be a string of 8 numbers plus letter' a_ = 'TRWAGMYFPDXBNJZSQVHLCKE' def __UpperCAmelCase ( __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : str = f"""Expected string as input, found {type(__UpperCamelCase ).__name__}""" raise TypeError(__UpperCamelCase ) __lowercase : int = spanish_id.replace('''-''' , '''''' ).upper() if len(__UpperCamelCase ) != 9: raise ValueError(__UpperCamelCase ) try: __lowercase : Tuple = int(spanish_id_clean[0:8] ) __lowercase : Optional[int] = spanish_id_clean[8] except ValueError as ex: raise ValueError(__UpperCamelCase ) from ex if letter.isdigit(): raise ValueError(__UpperCamelCase ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if digit_amount > 0: return round(number - int(__UpperCamelCase ) , __UpperCamelCase ) return number - int(__UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
1
"""simple docstring""" import argparse from collections import defaultdict import yaml a_ = 'docs/source/en/_toctree.yml' def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[Any] = defaultdict(__UpperCamelCase ) __lowercase : List[Any] = [] __lowercase : int = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({'''local''': doc['''local'''], '''title''': doc['''title''']} ) else: new_doc_list.append(__UpperCamelCase ) __lowercase : int = new_doc_list __lowercase : Any = [key for key, value in counts.items() if value > 1] __lowercase : int = [] for duplicate_key in duplicates: __lowercase : Optional[Any] = list({doc['''title'''] for doc in doc_list if doc['''local'''] == duplicate_key} ) if len(__UpperCamelCase ) > 1: raise ValueError( f"""{duplicate_key} is present several times in the documentation table of content at """ '''`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the ''' '''others.''' ) # Only add this once new_doc.append({'''local''': duplicate_key, '''title''': titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if '''local''' not in counts or counts[doc['''local''']] == 1] ) __lowercase : Tuple = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : s["title"].lower() ) # "overview" gets special treatment and is always first if len(__UpperCamelCase ) > 1: raise ValueError('''{doc_list} has two \'overview\' docs which is not allowed.''' ) overview_doc.extend(__UpperCamelCase ) # Sort return overview_doc def __UpperCAmelCase ( __UpperCamelCase=False ): with open(__UpperCamelCase , encoding='''utf-8''' ) as f: __lowercase : List[str] = yaml.safe_load(f.read() ) # Get to the API doc __lowercase : Dict = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase : Union[str, Any] = content[api_idx]['''sections'''] # Then to the model doc __lowercase : List[str] = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __lowercase : str = api_doc[scheduler_idx]['''sections'''] __lowercase : Tuple = clean_doc_toc(__UpperCamelCase ) __lowercase : Dict = False if new_scheduler_doc != scheduler_doc: __lowercase : Dict = True if overwrite: __lowercase : Optional[Any] = new_scheduler_doc if diff: if overwrite: __lowercase : Tuple = api_doc with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) def __UpperCAmelCase ( __UpperCamelCase=False ): with open(__UpperCamelCase , encoding='''utf-8''' ) as f: __lowercase : Optional[Any] = yaml.safe_load(f.read() ) # Get to the API doc __lowercase : int = 0 while content[api_idx]["title"] != "API": api_idx += 1 __lowercase : Dict = content[api_idx]['''sections'''] # Then to the model doc __lowercase : int = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __lowercase : Optional[int] = False __lowercase : List[Any] = api_doc[pipeline_idx]['''sections'''] __lowercase : List[Any] = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __lowercase : Tuple = pipeline_doc['''section'''] __lowercase : List[Any] = clean_doc_toc(__UpperCamelCase ) if overwrite: __lowercase : Dict = new_sub_pipeline_doc new_pipeline_docs.append(__UpperCamelCase ) # sort overall pipeline doc __lowercase : Union[str, Any] = clean_doc_toc(__UpperCamelCase ) if new_pipeline_docs != pipeline_docs: __lowercase : List[Any] = True if overwrite: __lowercase : Optional[int] = new_pipeline_docs if diff: if overwrite: __lowercase : Union[str, Any] = api_doc with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f: f.write(yaml.dump(__UpperCamelCase , allow_unicode=__UpperCamelCase ) ) else: raise ValueError( '''The model doc part of the table of content is not properly sorted, run `make style` to fix this.''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') a_ = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowercase : set[int] = set() return any( node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for node in graph ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): visited.add(__UpperCamelCase ) rec_stk.add(__UpperCamelCase ) for node in graph[vertex]: if node not in visited: if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__UpperCamelCase ) return False if __name__ == "__main__": from doctest import testmod testmod()
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase = 10_00 ): return sum(e for e in range(3 , __UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F"{solution() = }")
76
"""simple docstring""" import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a_ = logging.getLogger(__name__) class UpperCAmelCase_ ( snake_case ): def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]: __lowercase : Tuple = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] ) __lowercase : Any = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> int: super().__init__(UpperCamelCase_ ) __lowercase : Optional[Any] = BertEncoderWithPabee(UpperCamelCase_ ) self.init_weights() __lowercase : str = 0 __lowercase : Optional[Any] = 0 __lowercase : Optional[int] = 0 __lowercase : int = 0 def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = threshold def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Optional[int] = patience def _lowerCamelCase ( self ) -> List[str]: __lowercase : Tuple = 0 __lowercase : Tuple = 0 def _lowerCamelCase ( self ) -> List[Any]: __lowercase : Optional[int] = self.inference_layers_num / self.inference_instances_num __lowercase : int = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: __lowercase : Tuple = input_ids.size() elif inputs_embeds is not None: __lowercase : List[Any] = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) __lowercase : int = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: __lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __lowercase ,__lowercase ,__lowercase : Optional[int] = encoder_hidden_states.size() __lowercase : Any = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __lowercase : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) __lowercase : Tuple = self.invert_attention_mask(UpperCamelCase_ ) else: __lowercase : Tuple = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) __lowercase : Optional[int] = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) __lowercase : Union[str, Any] = embedding_output if self.training: __lowercase : List[Any] = [] for i in range(self.config.num_hidden_layers ): __lowercase : str = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : int = self.pooler(UpperCamelCase_ ) __lowercase : str = output_layers[i](output_dropout(UpperCamelCase_ ) ) res.append(UpperCamelCase_ ) elif self.patience == 0: # Use all layers for inference __lowercase : int = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) __lowercase : Optional[Any] = self.pooler(encoder_outputs[0] ) __lowercase : int = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )] else: __lowercase : Optional[int] = 0 __lowercase : Union[str, Any] = None __lowercase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __lowercase : Tuple = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : Dict = self.pooler(UpperCamelCase_ ) __lowercase : Optional[int] = output_layers[i](UpperCamelCase_ ) if regression: __lowercase : Any = logits.detach() if patient_result is not None: __lowercase : List[str] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __lowercase : int = 0 else: __lowercase : List[str] = logits.detach().argmax(dim=1 ) if patient_result is not None: __lowercase : Optional[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ): patient_counter += 1 else: __lowercase : Tuple = 0 __lowercase : Union[str, Any] = logits if patient_counter == self.patience: break __lowercase : Optional[int] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> Optional[Any]: super().__init__(UpperCamelCase_ ) __lowercase : List[Any] = config.num_labels __lowercase : int = BertModelWithPabee(UpperCamelCase_ ) __lowercase : int = nn.Dropout(config.hidden_dropout_prob ) __lowercase : Union[str, Any] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> int: __lowercase : Union[str, Any] = self.bert( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __lowercase : List[str] = (logits[-1],) if labels is not None: __lowercase : Any = None __lowercase : Optional[int] = 0 for ix, logits_item in enumerate(UpperCamelCase_ ): if self.num_labels == 1: # We are doing regression __lowercase : Any = MSELoss() __lowercase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __lowercase : str = CrossEntropyLoss() __lowercase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __lowercase : List[str] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __lowercase : Union[str, Any] = (total_loss / total_weights,) + outputs return outputs
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): if n == 1 or not isinstance(__UpperCamelCase , __UpperCamelCase ): return 0 elif n == 2: return 1 else: __lowercase : int = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = 0 __lowercase : List[str] = 2 while digits < n: index += 1 __lowercase : Dict = len(str(fibonacci(__UpperCamelCase ) ) ) return index def __UpperCAmelCase ( __UpperCamelCase = 10_00 ): return fibonacci_digits_index(__UpperCamelCase ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
76
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split('''.''' ): __lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __lowercase : int = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase : List[str] = value elif weight_type == "weight_g": __lowercase : Optional[Any] = value elif weight_type == "weight_v": __lowercase : Tuple = value elif weight_type == "bias": __lowercase : Dict = value else: __lowercase : Union[str, Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Tuple = [] __lowercase : Union[str, Any] = fairseq_model.state_dict() __lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __lowercase : int = True if "*" in mapped_key: __lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2] __lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __lowercase : Tuple = '''weight_g''' elif "weight_v" in name: __lowercase : Optional[int] = '''weight_v''' elif "weight" in name: __lowercase : str = '''weight''' elif "bias" in name: __lowercase : Optional[int] = '''bias''' else: __lowercase : List[str] = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1] __lowercase : str = name.split('''.''' ) __lowercase : Dict = int(items[0] ) __lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): if config_path is not None: __lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : str = HubertConfig() if is_finetuned: if dict_path: __lowercase : Tuple = Dictionary.load(__UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : int = target_dict.pad_index __lowercase : Union[str, Any] = target_dict.bos_index __lowercase : int = target_dict.eos_index __lowercase : int = len(target_dict.symbols ) __lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' ) if not os.path.isdir(__UpperCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) ) return os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __UpperCamelCase ) __lowercase : str = WavaVecaCTCTokenizer( __UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , ) __lowercase : str = True if config.feat_extract_norm == '''layer''' else False __lowercase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ) __lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) __lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase ) else: __lowercase : Union[str, Any] = HubertModel(__UpperCamelCase ) if is_finetuned: __lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) a_ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
76
1
"""simple docstring""" import os import sys a_ = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a_ = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModel.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
76
"""simple docstring""" a_ = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): while a != 0: __lowercase ,__lowercase : Tuple = b % a, a return b def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if gcd(__UpperCamelCase , __UpperCamelCase ) != 1: __lowercase : Union[str, Any] = f"""mod inverse of {a!r} and {m!r} does not exist""" raise ValueError(__UpperCamelCase ) __lowercase ,__lowercase ,__lowercase : str = 1, 0, a __lowercase ,__lowercase ,__lowercase : str = 0, 1, m while va != 0: __lowercase : Union[str, Any] = ua // va __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : int = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
76
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="openai/whisper-base" UpperCamelCase =( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) UpperCamelCase ="transcriber" UpperCamelCase =WhisperProcessor UpperCamelCase =WhisperForConditionalGeneration UpperCamelCase =["audio"] UpperCamelCase =["text"] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).input_features def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.model.generate(inputs=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0]
76
1
"""simple docstring""" a_ = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] a_ = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] a_ = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): assert len(str(__UpperCamelCase ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __lowercase : Union[str, Any] = year // 1_00 __lowercase : Union[str, Any] = (5 * (century % 4) + 2) % 7 __lowercase : int = year % 1_00 __lowercase : Optional[int] = centurian % 12 __lowercase : str = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __lowercase : List[Any] = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 4_00) == 0) else DOOMSDAY_LEAP[month - 1] ) __lowercase : int = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" import gc import threading import time import psutil import torch class UpperCAmelCase_ : def __init__( self ) -> str: __lowercase : List[Any] = psutil.Process() __lowercase : Any = False def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Optional[Any] = -1 while True: __lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[Any] = True __lowercase : List[Any] = threading.Thread(target=self.peak_monitor ) __lowercase : Optional[int] = True self.thread.start() def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Union[str, Any] = False self.thread.join() return self.cpu_memory_peak a_ = PeakCPUMemory() def __UpperCAmelCase ( ): # Time __lowercase : Union[str, Any] = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : List[Any] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase ) torch.cuda.reset_peak_memory_stats() return measures def __UpperCAmelCase ( __UpperCamelCase ): # Time __lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20 __lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 __lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 return measures def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): print(f"""{description}:""" ) print(f"""- Time: {measures["time"]:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" ) __lowercase : Dict = measures[f"""{i}-peak"""] print(f"""- GPU {i} peak: {peak:.2f}MiB""" ) print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" ) print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
76
1
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase = 4 ): __lowercase : Dict = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Union[str, Any] = matrix[::-1] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [x[::-1] for x in matrix] return matrix def __UpperCAmelCase ( __UpperCamelCase ): for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
"""simple docstring""" import numpy as np import datasets a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def _lowerCamelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: # convert to numpy arrays __lowercase : Dict = np.array(UpperCamelCase_ ) __lowercase : str = np.array(UpperCamelCase_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction __lowercase : Tuple = X - np.mean(UpperCamelCase_ ) __lowercase : List[Any] = np.cov(reference_distribution.T ) try: __lowercase : Tuple = np.linalg.inv(UpperCamelCase_ ) except np.linalg.LinAlgError: __lowercase : str = np.linalg.pinv(UpperCamelCase_ ) __lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
76
1
"""simple docstring""" import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : List[str] = checkpoints.load_tax_checkpoint(__UpperCamelCase ) __lowercase : Any = flatten_dict(__UpperCamelCase ) return flax_params def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : int = {} __lowercase : Optional[int] = { '''token_embedder''': '''embeddings''', '''encoder_norm''': '''layernorm''', '''kernel''': '''weight''', '''.out''': '''.output''', '''scale''': '''weight''', '''embedders_0.pos_embedding''': '''row_embedder.weight''', '''embedders_1.pos_embedding''': '''column_embedder.weight''', } __lowercase : List[str] = { '''query''': '''attention.query''', '''key''': '''attention.key''', '''value''': '''attention.value''', '''output.dense''': '''output''', '''encoder_decoder_attention.o''': '''encoder_decoder_attention.attention.o''', '''pre_self_attention_layer_norm''': '''self_attention.layer_norm''', '''pre_cross_attention_layer_norm''': '''encoder_decoder_attention.layer_norm''', '''mlp.''': '''mlp.DenseReluDense.''', '''pre_mlp_layer_norm''': '''mlp.layer_norm''', '''self_attention.o''': '''self_attention.attention.o''', '''decoder.embeddings.embedding''': '''decoder.embed_tokens.weight''', '''decoder.relpos_bias.rel_embedding''': '''decoder.layer.0.self_attention.attention.relative_attention_bias.weight''', '''decoder.decoder_norm.weight''': '''decoder.final_layer_norm.weight''', '''decoder.logits_dense.weight''': '''decoder.lm_head.weight''', } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __lowercase : Dict = '''.'''.join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __lowercase : List[Any] = new_key.replace(__UpperCamelCase , __UpperCamelCase ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __lowercase : str = new_key.replace(__UpperCamelCase , __UpperCamelCase ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __lowercase : int = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __UpperCamelCase ) __lowercase : List[Any] = new_key.replace('''encoder''' , '''encoder.encoder''' ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __lowercase : int = re.sub(R'''layers_(\d+)''' , R'''layer.\1''' , __UpperCamelCase ) __lowercase : Optional[int] = flax_dict[key] __lowercase : Dict = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __lowercase : Optional[int] = torch.from_numpy(converted_dict[key].T ) else: __lowercase : Dict = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False , __UpperCamelCase=False ): __lowercase : Optional[int] = get_flax_param(__UpperCamelCase ) if not use_large: __lowercase : int = PixaStructVisionConfig() __lowercase : str = PixaStructTextConfig() else: __lowercase : str = PixaStructVisionConfig( hidden_size=15_36 , d_ff=39_68 , num_attention_heads=24 , num_hidden_layers=18 ) __lowercase : Dict = PixaStructTextConfig(hidden_size=15_36 , d_ff=39_68 , num_heads=24 , num_layers=18 ) __lowercase : List[str] = PixaStructConfig( vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=__UpperCamelCase ) __lowercase : Dict = PixaStructForConditionalGeneration(__UpperCamelCase ) __lowercase : List[Any] = rename_and_convert_flax_params(__UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) __lowercase : Union[str, Any] = AutoTokenizer.from_pretrained('''ybelkada/test-pix2struct-tokenizer''' ) __lowercase : int = PixaStructImageProcessor() __lowercase : str = PixaStructProcessor(image_processor=__UpperCamelCase , tokenizer=__UpperCamelCase ) if use_large: __lowercase : List[Any] = 40_96 __lowercase : Tuple = True # mkdir if needed os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) model.save_pretrained(__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) print('''Model saved in {}'''.format(__UpperCamelCase ) ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--use_large', action='store_true', help='Use large model.') parser.add_argument('--is_vqa', action='store_true', help='Use large model.') a_ = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
76
"""simple docstring""" a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __UpperCAmelCase ( __UpperCamelCase ): # Make sure the supplied data is a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : str = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(__UpperCamelCase ) __lowercase : Any = ''''''.join(bin(__UpperCamelCase )[2:].zfill(8 ) for byte in data ) __lowercase : List[str] = len(__UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __lowercase : int = B'''=''' * ((6 - len(__UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__UpperCamelCase ) % 6) else: __lowercase : Any = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__UpperCamelCase ) , 6 ) ).encode() + padding ) def __UpperCAmelCase ( __UpperCamelCase ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : List[str] = ( '''argument should be a bytes-like object or ASCII string, ''' f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(__UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__UpperCamelCase , __UpperCamelCase ): try: __lowercase : List[str] = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) __lowercase : Dict = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __lowercase : Tuple = encoded_data[:-padding] __lowercase : str = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __lowercase : Any = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __lowercase : int = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__UpperCamelCase ) , 8 ) ] return bytes(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __UpperCAmelCase ( __UpperCamelCase ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __lowercase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __lowercase : Dict = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __lowercase : Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __lowercase : Tuple = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __lowercase : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __lowercase : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __lowercase : Optional[int] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __lowercase : Union[str, Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __lowercase : str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __lowercase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __lowercase : str = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __lowercase : Dict = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __lowercase : Union[str, Any] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __lowercase : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' ) __lowercase : Any = key.replace('''image_projection''' , '''flava.image_projection''' ) __lowercase : Tuple = value.float() for key, value in codebook_state_dict.items(): __lowercase : int = value return upgrade @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): if config_path is not None: __lowercase : Union[str, Any] = FlavaConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : Union[str, Any] = FlavaConfig() __lowercase : Any = FlavaForPreTraining(__UpperCamelCase ).eval() __lowercase : Any = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): __lowercase : Optional[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Optional[int] = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) __lowercase : Union[str, Any] = hf_model.state_dict() __lowercase : Optional[Any] = count_parameters(__UpperCamelCase ) __lowercase : List[Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
76
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } a_ = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } a_ = { 'ctrl': 2_5_6, } a_ = { 'Pregnancy': 1_6_8_6_2_9, 'Christianity': 7_6_7_5, 'Explain': 1_0_6_4_2_3, 'Fitness': 6_3_4_4_0, 'Saving': 6_3_1_6_3, 'Ask': 2_7_1_7_1, 'Ass': 9_5_9_8_5, 'Joke': 1_6_3_5_0_9, 'Questions': 4_5_6_2_2, 'Thoughts': 4_9_6_0_5, 'Retail': 5_2_3_4_2, 'Feminism': 1_6_4_3_3_8, 'Writing': 1_1_9_9_2, 'Atheism': 1_9_2_2_6_3, 'Netflix': 4_8_6_1_6, 'Computing': 3_9_6_3_9, 'Opinion': 4_3_2_1_3, 'Alone': 4_4_9_6_7, 'Funny': 5_8_9_1_7, 'Gaming': 4_0_3_5_8, 'Human': 4_0_8_8, 'India': 1_3_3_1, 'Joker': 7_7_1_3_8, 'Diet': 3_6_2_0_6, 'Legal': 1_1_8_5_9, 'Norman': 4_9_3_9, 'Tip': 7_2_6_8_9, 'Weight': 5_2_3_4_3, 'Movies': 4_6_2_7_3, 'Running': 2_3_4_2_5, 'Science': 2_0_9_0, 'Horror': 3_7_7_9_3, 'Confession': 6_0_5_7_2, 'Finance': 1_2_2_5_0, 'Politics': 1_6_3_6_0, 'Scary': 1_9_1_9_8_5, 'Support': 1_2_6_5_4, 'Technologies': 3_2_5_1_6, 'Teenage': 6_6_1_6_0, 'Event': 3_2_7_6_9, 'Learned': 6_7_4_6_0, 'Notion': 1_8_2_7_7_0, 'Wikipedia': 3_7_5_8_3, 'Books': 6_6_6_5, 'Extract': 7_6_0_5_0, 'Confessions': 1_0_2_7_0_1, 'Conspiracy': 7_5_9_3_2, 'Links': 6_3_6_7_4, 'Narcissus': 1_5_0_4_2_5, 'Relationship': 5_4_7_6_6, 'Relationships': 1_3_4_7_9_6, 'Reviews': 4_1_6_7_1, 'News': 4_2_5_6, 'Translation': 2_6_8_2_0, 'multilingual': 1_2_8_4_0_6, } def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Any = set() __lowercase : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase : Any = char __lowercase : List[Any] = set(__UpperCamelCase ) return pairs class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTROL_CODES def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ) -> int: super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: __lowercase : List[Any] = json.load(UpperCamelCase_ ) __lowercase : Any = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: __lowercase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1] __lowercase : Optional[Any] = [tuple(merge.split() ) for merge in merges] __lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowercase : Optional[Any] = {} @property def _lowerCamelCase ( self ) -> Union[str, Any]: return len(self.encoder ) def _lowerCamelCase ( self ) -> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.cache: return self.cache[token] __lowercase : str = tuple(UpperCamelCase_ ) __lowercase : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowercase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __lowercase : Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowercase ,__lowercase : Tuple = bigram __lowercase : int = [] __lowercase : Union[str, Any] = 0 while i < len(UpperCamelCase_ ): try: __lowercase : Optional[int] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase : Tuple = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase : List[str] = tuple(UpperCamelCase_ ) __lowercase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __lowercase : List[str] = get_pairs(UpperCamelCase_ ) __lowercase : Optional[Any] = '''@@ '''.join(UpperCamelCase_ ) __lowercase : Dict = word[:-4] __lowercase : str = word return word def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: __lowercase : List[Any] = [] __lowercase : int = re.findall(R'''\S+\n?''' , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) ) return split_tokens def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> int: return self.decoder.get(UpperCamelCase_ , self.unk_token ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Tuple = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) __lowercase : List[str] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __lowercase : Union[str, Any] = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
76
1
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger a_ = get_logger(__name__) a_ = Path(__file__).parent / 'model_card_template.md' a_ = uuida().hex a_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES a_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES a_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def __UpperCAmelCase ( __UpperCamelCase = None ): __lowercase : Optional[int] = f"""diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}""" if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f"""; torch/{_torch_version}""" if is_flax_available(): ua += f"""; jax/{_jax_version}""" ua += f"""; flax/{_flax_version}""" if is_onnx_available(): ua += f"""; onnxruntime/{_onnxruntime_version}""" # CI will set this value to True if os.environ.get('''DIFFUSERS_IS_CI''' , '''''' ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(__UpperCamelCase , __UpperCamelCase ): ua += "; " + "; ".join(f"""{k}/{v}""" for k, v in user_agent.items() ) elif isinstance(__UpperCamelCase , __UpperCamelCase ): ua += "; " + user_agent return ua def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = None ): if token is None: __lowercase : List[Any] = HfFolder.get_token() if organization is None: __lowercase : Union[str, Any] = whoami(__UpperCamelCase )['''name'''] return f"""{username}/{model_id}""" else: return f"""{organization}/{model_id}""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if not is_jinja_available(): raise ValueError( '''Modelcard rendering is based on Jinja templates.''' ''' Please make sure to have `jinja` installed before using `create_model_card`.''' ''' To install it, please run `pip install Jinja2`.''' ) if hasattr(__UpperCamelCase , '''local_rank''' ) and args.local_rank not in [-1, 0]: return __lowercase : Optional[int] = args.hub_token if hasattr(__UpperCamelCase , '''hub_token''' ) else None __lowercase : Tuple = get_full_repo_name(__UpperCamelCase , token=__UpperCamelCase ) __lowercase : int = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language='''en''' , license='''apache-2.0''' , library_name='''diffusers''' , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=__UpperCamelCase , model_name=__UpperCamelCase , repo_name=__UpperCamelCase , dataset_name=args.dataset_name if hasattr(__UpperCamelCase , '''dataset_name''' ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(__UpperCamelCase , '''gradient_accumulation_steps''' ) else None ) , adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase , '''adam_beta1''' ) else None , adam_betaa=args.adam_betaa if hasattr(__UpperCamelCase , '''adam_beta2''' ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(__UpperCamelCase , '''adam_weight_decay''' ) else None , adam_epsilon=args.adam_epsilon if hasattr(__UpperCamelCase , '''adam_epsilon''' ) else None , lr_scheduler=args.lr_scheduler if hasattr(__UpperCamelCase , '''lr_scheduler''' ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(__UpperCamelCase , '''lr_warmup_steps''' ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(__UpperCamelCase , '''ema_inv_gamma''' ) else None , ema_power=args.ema_power if hasattr(__UpperCamelCase , '''ema_power''' ) else None , ema_max_decay=args.ema_max_decay if hasattr(__UpperCamelCase , '''ema_max_decay''' ) else None , mixed_precision=args.mixed_precision , ) __lowercase : Optional[Any] = os.path.join(args.output_dir , '''README.md''' ) model_card.save(__UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = None ): if resolved_file is None or commit_hash is not None: return commit_hash __lowercase : Tuple = str(Path(__UpperCamelCase ).as_posix() ) __lowercase : Any = re.search(R'''snapshots/([^/]+)/''' , __UpperCamelCase ) if search is None: return None __lowercase : Optional[Any] = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(__UpperCamelCase ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. a_ = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) a_ = os.path.join(hf_cache_home, 'diffusers') def __UpperCAmelCase ( __UpperCamelCase = None , __UpperCamelCase = None ): if new_cache_dir is None: __lowercase : Tuple = DIFFUSERS_CACHE if old_cache_dir is None: __lowercase : Tuple = old_diffusers_cache __lowercase : Union[str, Any] = Path(__UpperCamelCase ).expanduser() __lowercase : Optional[Any] = Path(__UpperCamelCase ).expanduser() for old_blob_path in old_cache_dir.glob('''**/blobs/*''' ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): __lowercase : List[str] = new_cache_dir / old_blob_path.relative_to(__UpperCamelCase ) new_blob_path.parent.mkdir(parents=__UpperCamelCase , exist_ok=__UpperCamelCase ) os.replace(__UpperCamelCase , __UpperCamelCase ) try: os.symlink(__UpperCamelCase , __UpperCamelCase ) except OSError: logger.warning( '''Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.''' ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). a_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): a_ = 0 else: with open(cache_version_file) as f: try: a_ = int(f.read()) except ValueError: a_ = 0 if cache_version < 1: a_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: a_ = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F"There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease " 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F"There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure " 'the directory exists and can be written to.' ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase = None ): if variant is not None: __lowercase : Optional[int] = weights_name.split('''.''' ) __lowercase : List[Any] = splits[:-1] + [variant] + splits[-1:] __lowercase : str = '''.'''.join(__UpperCamelCase ) return weights_name def __UpperCAmelCase ( __UpperCamelCase , *, __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , ): __lowercase : Optional[Any] = str(__UpperCamelCase ) if os.path.isfile(__UpperCamelCase ): return pretrained_model_name_or_path elif os.path.isdir(__UpperCamelCase ): if os.path.isfile(os.path.join(__UpperCamelCase , __UpperCamelCase ) ): # Load from a PyTorch checkpoint __lowercase : Any = os.path.join(__UpperCamelCase , __UpperCamelCase ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) ): __lowercase : int = os.path.join(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return model_file else: raise EnvironmentError( f"""Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.""" ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(__UpperCamelCase ).base_version ) >= version.parse('''0.20.0''' ) ): try: __lowercase : int = hf_hub_download( __UpperCamelCase , filename=_add_variant(__UpperCamelCase , __UpperCamelCase ) , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , user_agent=__UpperCamelCase , subfolder=__UpperCamelCase , revision=revision or commit_hash , ) warnings.warn( f"""Loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'` is deprecated. Loading instead from `revision='main'` with `variant={revision}`. Loading model variants via `revision='{revision}'` will be removed in diffusers v1. Please use `variant='{revision}'` instead.""" , __UpperCamelCase , ) return model_file except: # noqa: E722 warnings.warn( f"""You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision='{revision}'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant='{revision}'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(__UpperCamelCase , __UpperCamelCase )} file in the 'main' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title '{pretrained_model_name_or_path} is missing {_add_variant(__UpperCamelCase , __UpperCamelCase )}' so that the correct variant file can be added.""" , __UpperCamelCase , ) try: # 2. Load model file as usual __lowercase : Tuple = hf_hub_download( __UpperCamelCase , filename=__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , use_auth_token=__UpperCamelCase , user_agent=__UpperCamelCase , subfolder=__UpperCamelCase , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier """ '''listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a ''' '''token having permission to this repo with `use_auth_token` or log in with `huggingface-cli ''' '''login`.''' ) except RevisionNotFoundError: raise EnvironmentError( f"""{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for """ '''this model name. Check the model page at ''' f"""'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.""" ) except EntryNotFoundError: raise EnvironmentError( f"""{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.""" ) except HTTPError as err: raise EnvironmentError( f"""There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}""" ) except ValueError: raise EnvironmentError( f"""We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it""" f""" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a""" f""" directory containing a file named {weights_name} or""" ''' \nCheckout your internet connection or see how to run the library in''' ''' offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.''' ) except EnvironmentError: raise EnvironmentError( f"""Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from """ '''\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. ''' f"""Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory """ f"""containing a file named {weights_name}""" )
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if digit_amount > 0: return round(number - int(__UpperCamelCase ) , __UpperCamelCase ) return number - int(__UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = '▁' a_ = {'vocab_file': 'sentencepiece.bpe.model'} a_ = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } a_ = { 'xlm-roberta-base': 5_1_2, 'xlm-roberta-large': 5_1_2, 'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2, 'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2, 'xlm-roberta-large-finetuned-conll03-english': 5_1_2, 'xlm-roberta-large-finetuned-conll03-german': 5_1_2, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __lowercase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __lowercase : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase : Tuple = 1 __lowercase : Any = len(self.sp_model ) + self.fairseq_offset __lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: __lowercase : int = self.__dict__.copy() __lowercase : int = None __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ) -> Tuple: __lowercase : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowercase : str = {} __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase : Dict = [self.cls_token_id] __lowercase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: __lowercase : Optional[Any] = [self.sep_token_id] __lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _lowerCamelCase ( self ) -> str: __lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
76
1
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class UpperCAmelCase_ ( snake_case ): UpperCamelCase =None UpperCamelCase =None UpperCamelCase =None UpperCamelCase =None class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_=5_12 , UpperCamelCase_="cls" , UpperCamelCase_=False , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]: super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Optional[Any] = project_dim __lowercase : Union[str, Any] = pooler_fn __lowercase : List[Any] = learn_encoder __lowercase : Union[str, Any] = use_attention_mask class UpperCAmelCase_ ( snake_case ): UpperCamelCase =[r"pooler", r"logit_scale"] UpperCamelCase =[r"position_ids", r"predictions.decoder.bias"] UpperCamelCase ="roberta" UpperCamelCase =RobertaSeriesConfig def __init__( self , UpperCamelCase_ ) -> Dict: super().__init__(UpperCamelCase_ ) __lowercase : Optional[int] = XLMRobertaModel(UpperCamelCase_ ) __lowercase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim ) __lowercase : str = getattr(UpperCamelCase_ , '''has_pre_transformation''' , UpperCamelCase_ ) if self.has_pre_transformation: __lowercase : int = nn.Linear(config.hidden_size , config.project_dim ) __lowercase : Optional[Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def _lowerCamelCase ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Tuple: __lowercase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict __lowercase : int = self.base_model( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , output_attentions=UpperCamelCase_ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase_ , ) if self.has_pre_transformation: __lowercase : Any = outputs['''hidden_states'''][-2] __lowercase : Dict = self.pre_LN(UpperCamelCase_ ) __lowercase : Union[str, Any] = self.transformation_pre(UpperCamelCase_ ) return TransformationModelOutput( projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __lowercase : List[Any] = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=UpperCamelCase_ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
76
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput a_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Tuple: super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Union[str, Any] = eval_examples __lowercase : Union[str, Any] = post_process_function __lowercase : Any = quant_trainer_args __lowercase : Optional[Any] = 1_28 # default number of calibration samples def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __lowercase : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset __lowercase : str = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' ) return DataLoader( UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , ) def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: __lowercase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset __lowercase : List[Any] = self.get_calib_dataloader(UpperCamelCase_ ) __lowercase : Dict = self.model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ ) model.eval() quant_trainer.enable_calibration(UpperCamelCase_ ) logger.info('''***** Running calibration *****''' ) logger.info(F""" Num examples = {self.calib_num}""" ) logger.info(F""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(UpperCamelCase_ ): # Prediction step __lowercase ,__lowercase ,__lowercase : Optional[Any] = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = model def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> str: __lowercase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __lowercase : Union[str, Any] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowercase : Optional[int] = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Tuple = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __lowercase : int = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) self.log(UpperCamelCase_ ) else: __lowercase : Dict = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowercase : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> List[Any]: __lowercase : Optional[int] = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. __lowercase : str = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Union[str, Any] = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : Any = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __lowercase : Dict = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_="./" ) -> int: __lowercase : Optional[int] = self.eval_dataset __lowercase : Optional[int] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : Any = next(iter(UpperCamelCase_ ) ) # saving device - to make it consistent __lowercase : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __lowercase : Tuple = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __lowercase : List[Any] = True __lowercase : int = self.model.to(UpperCamelCase_ ) model.eval() model.float() __lowercase : Optional[int] = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = os.path.join(UpperCamelCase_ , '''model.onnx''' ) logger.info(F"""exporting model to {output_model_file}""" ) __lowercase : Tuple = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=UpperCamelCase_ , ) logger.info('''onnx export finished''' )
76
1
"""simple docstring""" import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Any: super().__init__() __lowercase : Optional[Any] = value_function __lowercase : Tuple = unet __lowercase : Any = scheduler __lowercase : List[str] = env __lowercase : Dict = env.get_dataset() __lowercase : Dict = {} for key in self.data.keys(): try: __lowercase : List[str] = self.data[key].mean() except: # noqa: E722 pass __lowercase : List[str] = {} for key in self.data.keys(): try: __lowercase : str = self.data[key].std() except: # noqa: E722 pass __lowercase : Any = env.observation_space.shape[0] __lowercase : List[Any] = env.action_space.shape[0] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Any: return (x_in - self.means[key]) / self.stds[key] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: return x_in * self.stds[key] + self.means[key] def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]: if type(UpperCamelCase_ ) is dict: return {k: self.to_torch(UpperCamelCase_ ) for k, v in x_in.items()} elif torch.is_tensor(UpperCamelCase_ ): return x_in.to(self.unet.device ) return torch.tensor(UpperCamelCase_ , device=self.unet.device ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]: for key, val in cond.items(): __lowercase : Optional[int] = val.clone() return x_in def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> str: __lowercase : List[str] = x.shape[0] __lowercase : int = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model __lowercase : Tuple = torch.full((batch_size,) , UpperCamelCase_ , device=self.unet.device , dtype=torch.long ) for _ in range(UpperCamelCase_ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models __lowercase : Optional[int] = self.value_function(x.permute(0 , 2 , 1 ) , UpperCamelCase_ ).sample __lowercase : List[Any] = torch.autograd.grad([y.sum()] , [x] )[0] __lowercase : Dict = self.scheduler._get_variance(UpperCamelCase_ ) __lowercase : str = torch.exp(0.5 * posterior_variance ) __lowercase : Tuple = model_std * grad __lowercase : Dict = 0 __lowercase : Optional[int] = x.detach() __lowercase : Optional[Any] = x + scale * grad __lowercase : List[str] = self.reset_xa(UpperCamelCase_ , UpperCamelCase_ , self.action_dim ) __lowercase : List[Any] = self.unet(x.permute(0 , 2 , 1 ) , UpperCamelCase_ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg __lowercase : Tuple = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , predict_epsilon=UpperCamelCase_ )['''prev_sample'''] # apply conditions to the trajectory (set the initial state) __lowercase : Any = self.reset_xa(UpperCamelCase_ , UpperCamelCase_ , self.action_dim ) __lowercase : Dict = self.to_torch(UpperCamelCase_ ) return x, y def __call__( self , UpperCamelCase_ , UpperCamelCase_=64 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=0.1 ) -> List[Any]: # normalize the observations and create batch dimension __lowercase : Optional[Any] = self.normalize(UpperCamelCase_ , '''observations''' ) __lowercase : Any = obs[None].repeat(UpperCamelCase_ , axis=0 ) __lowercase : Optional[int] = {0: self.to_torch(UpperCamelCase_ )} __lowercase : Optional[int] = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) __lowercase : int = randn_tensor(UpperCamelCase_ , device=self.unet.device ) __lowercase : int = self.reset_xa(UpperCamelCase_ , UpperCamelCase_ , self.action_dim ) __lowercase : List[Any] = self.to_torch(UpperCamelCase_ ) # run the diffusion process __lowercase ,__lowercase : str = self.run_diffusion(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # sort output trajectories by value __lowercase : Optional[Any] = y.argsort(0 , descending=UpperCamelCase_ ).squeeze() __lowercase : List[Any] = x[sorted_idx] __lowercase : Union[str, Any] = sorted_values[:, :, : self.action_dim] __lowercase : List[str] = actions.detach().cpu().numpy() __lowercase : Optional[Any] = self.de_normalize(UpperCamelCase_ , key='''actions''' ) # select the action with the highest value if y is not None: __lowercase : str = 0 else: # if we didn't run value guiding, select a random action __lowercase : Optional[int] = np.random.randint(0 , UpperCamelCase_ ) __lowercase : Optional[Any] = denorm_actions[selected_index, 0] return denorm_actions
76
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" __lowercase : Dict = float(embedding_dim // 2 ) __lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) __lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 ) # scale embeddings __lowercase : Optional[int] = scale * emb if flip_sin_to_cos: __lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 ) else: __lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 ) __lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] ) return signal class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =jnp.floataa @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ ) __lowercase : str = nn.silu(UpperCamelCase_ ) __lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ ) return temb class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =False UpperCamelCase =1 @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: return get_sinusoidal_embeddings( UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or number < 0: raise ValueError('''Input must be a non-negative integer''' ) __lowercase : Optional[int] = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" import os import sys a_ = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a_ = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModel.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
76
1
"""simple docstring""" from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="WhisperFeatureExtractor" UpperCamelCase ="WhisperTokenizer" def __init__( self , UpperCamelCase_ , UpperCamelCase_ ) -> Dict: super().__init__(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Union[str, Any] = self.feature_extractor __lowercase : Tuple = False def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True ) -> Any: return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase_ , language=UpperCamelCase_ , no_timestamps=UpperCamelCase_ ) def __call__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: # For backward compatibility if self._in_target_context_manager: return self.current_processor(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Optional[int] = kwargs.pop('''audio''' , UpperCamelCase_ ) __lowercase : Optional[Any] = kwargs.pop('''sampling_rate''' , UpperCamelCase_ ) __lowercase : str = kwargs.pop('''text''' , UpperCamelCase_ ) if len(UpperCamelCase_ ) > 0: __lowercase : Union[str, Any] = args[0] __lowercase : List[str] = args[1:] if audio is None and text is None: raise ValueError('''You need to specify either an `audio` or `text` input to process.''' ) if audio is not None: __lowercase : Tuple = self.feature_extractor(UpperCamelCase_ , *UpperCamelCase_ , sampling_rate=UpperCamelCase_ , **UpperCamelCase_ ) if text is not None: __lowercase : List[Any] = self.tokenizer(UpperCamelCase_ , **UpperCamelCase_ ) if text is None: return inputs elif audio is None: return encodings else: __lowercase : int = encodings['''input_ids'''] return inputs def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: return self.tokenizer.batch_decode(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: return self.tokenizer.decode(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_="np" ) -> int: return self.tokenizer.get_prompt_ids(UpperCamelCase_ , return_tensors=UpperCamelCase_ )
76
"""simple docstring""" from math import pi, sqrt, tan def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __lowercase : int = (sidea + sidea + sidea) / 2 __lowercase : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(F"Rectangle: {area_rectangle(1_0, 2_0) = }") print(F"Square: {area_square(1_0) = }") print(F"Triangle: {area_triangle(1_0, 1_0) = }") print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }") print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }") print(F"Rhombus: {area_rhombus(1_0, 2_0) = }") print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }") print(F"Circle: {area_circle(2_0) = }") print(F"Ellipse: {area_ellipse(1_0, 2_0) = }") print('\nSurface Areas of various geometric shapes: \n') print(F"Cube: {surface_area_cube(2_0) = }") print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }") print(F"Sphere: {surface_area_sphere(2_0) = }") print(F"Hemisphere: {surface_area_hemisphere(2_0) = }") print(F"Cone: {surface_area_cone(1_0, 2_0) = }") print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }") print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }") print(F"Torus: {surface_area_torus(2_0, 1_0) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }") print(F"Square: {area_reg_polygon(4, 1_0) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
76
1
"""simple docstring""" import os def __UpperCAmelCase ( ): __lowercase : Dict = os.path.join(os.path.dirname(__UpperCamelCase ) , '''num.txt''' ) with open(__UpperCamelCase ) as file_hand: return str(sum(int(__UpperCamelCase ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # noqa: E741 while r - l > 1: __lowercase : int = (l + r) // 2 if v[m] >= key: __lowercase : Any = m else: __lowercase : List[Any] = m # noqa: E741 return r def __UpperCAmelCase ( __UpperCamelCase ): if len(__UpperCamelCase ) == 0: return 0 __lowercase : List[str] = [0] * len(__UpperCamelCase ) __lowercase : Any = 1 __lowercase : Dict = v[0] for i in range(1 , len(__UpperCamelCase ) ): if v[i] < tail[0]: __lowercase : Tuple = v[i] elif v[i] > tail[length - 1]: __lowercase : Optional[Any] = v[i] length += 1 else: __lowercase : Dict = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" import qiskit def __UpperCAmelCase ( __UpperCamelCase = 2 ): __lowercase : List[str] = qubits # Using Aer's simulator __lowercase : Any = qiskit.Aer.get_backend('''aer_simulator''' ) # Creating a Quantum Circuit acting on the q register __lowercase : Optional[int] = qiskit.QuantumCircuit(__UpperCamelCase , __UpperCamelCase ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1 , __UpperCamelCase ): # Adding CX (CNOT) gate circuit.cx(i - 1 , __UpperCamelCase ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(__UpperCamelCase ) ) , list(range(__UpperCamelCase ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator __lowercase : Dict = qiskit.execute(__UpperCamelCase , __UpperCamelCase , shots=10_00 ) return job.result().get_counts(__UpperCamelCase ) if __name__ == "__main__": print(F"Total count for various states are: {quantum_entanglement(3)}")
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase = 4 ): __lowercase : Dict = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Union[str, Any] = matrix[::-1] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [x[::-1] for x in matrix] return matrix def __UpperCAmelCase ( __UpperCamelCase ): for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(snake_case ) class UpperCAmelCase_ : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __lowercase : int = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __lowercase : str = len(UpperCamelCase_ ) __lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" ) __lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : Optional[Any] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __lowercase : str = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : List[str] = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]: __lowercase : List[Any] = reader_input['''input_ids'''] __lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3] __lowercase : Optional[int] = len(UpperCamelCase_ ) __lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : Any = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __lowercase : List[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]: __lowercase : Tuple = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __lowercase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowercase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase =["input_ids", "attention_mask"]
76
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available a_ = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="altclip_text_model" def __init__( self , UpperCamelCase_=25_00_02 , UpperCamelCase_=10_24 , UpperCamelCase_=24 , UpperCamelCase_=16 , UpperCamelCase_=40_96 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_14 , UpperCamelCase_=1 , UpperCamelCase_=0.0_2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-05 , UpperCamelCase_=1 , UpperCamelCase_=0 , UpperCamelCase_=2 , UpperCamelCase_="absolute" , UpperCamelCase_=True , UpperCamelCase_=7_68 , **UpperCamelCase_ , ) -> List[str]: super().__init__(pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : List[str] = vocab_size __lowercase : Optional[int] = hidden_size __lowercase : Optional[Any] = num_hidden_layers __lowercase : Optional[int] = num_attention_heads __lowercase : Optional[Any] = hidden_act __lowercase : int = intermediate_size __lowercase : str = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : int = max_position_embeddings __lowercase : Dict = type_vocab_size __lowercase : int = initializer_range __lowercase : Optional[int] = initializer_factor __lowercase : Union[str, Any] = layer_norm_eps __lowercase : int = position_embedding_type __lowercase : Optional[Any] = use_cache __lowercase : Optional[Any] = project_dim class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="altclip_vision_model" def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=30_72 , UpperCamelCase_=5_12 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=3 , UpperCamelCase_=2_24 , UpperCamelCase_=32 , UpperCamelCase_="quick_gelu" , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1.0 , **UpperCamelCase_ , ) -> Optional[int]: super().__init__(**UpperCamelCase_ ) __lowercase : Tuple = hidden_size __lowercase : Optional[int] = intermediate_size __lowercase : int = projection_dim __lowercase : Tuple = num_hidden_layers __lowercase : str = num_attention_heads __lowercase : str = num_channels __lowercase : int = patch_size __lowercase : List[str] = image_size __lowercase : Optional[int] = initializer_range __lowercase : Union[str, Any] = initializer_factor __lowercase : Optional[int] = attention_dropout __lowercase : str = layer_norm_eps __lowercase : Dict = hidden_act @classmethod def _lowerCamelCase ( cls , UpperCamelCase_ , **UpperCamelCase_ ) -> "PretrainedConfig": cls._set_token_in_kwargs(UpperCamelCase_ ) __lowercase ,__lowercase : Optional[Any] = cls.get_config_dict(UpperCamelCase_ , **UpperCamelCase_ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get('''model_type''' ) == "altclip": __lowercase : int = config_dict['''vision_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" ) return cls.from_dict(UpperCamelCase_ , **UpperCamelCase_ ) class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="altclip" UpperCamelCase =True def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=7_68 , UpperCamelCase_=2.6_5_9_2 , **UpperCamelCase_ ) -> Optional[int]: # If `_config_dict` exist, we use them for the backward compatibility. # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot # of confusion!). __lowercase : Optional[Any] = kwargs.pop('''text_config_dict''' , UpperCamelCase_ ) __lowercase : Tuple = kwargs.pop('''vision_config_dict''' , UpperCamelCase_ ) super().__init__(**UpperCamelCase_ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __lowercase : Any = {} # This is the complete result when using `text_config_dict`. __lowercase : Any = AltCLIPTextConfig(**UpperCamelCase_ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __lowercase : Union[str, Any] = ( F"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """ F"""The value `text_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __lowercase : Tuple = ( F"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """ F"""value `text_config[\"{key}\"]` will be overriden.""" ) logger.warning(UpperCamelCase_ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __lowercase : Optional[Any] = {} # This is the complete result when using `vision_config_dict`. __lowercase : List[str] = AltCLIPVisionConfig(**UpperCamelCase_ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __lowercase : Any = { str(UpperCamelCase_ ): value for key, value in _vision_config_dict['''id2label'''].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __lowercase : Union[str, Any] = ( F"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """ F"""values. The value `vision_config_dict[\"{key}\"]` will be used instead.""" ) # If inferred from default argument values (just to be super careful) else: __lowercase : Optional[int] = ( F"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """ F"""The value `vision_config[\"{key}\"]` will be overriden.""" ) logger.warning(UpperCamelCase_ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __lowercase : Optional[int] = {} logger.info('''`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.''' ) if vision_config is None: __lowercase : Tuple = {} logger.info('''`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.''' ) __lowercase : Union[str, Any] = AltCLIPTextConfig(**UpperCamelCase_ ) __lowercase : List[Any] = AltCLIPVisionConfig(**UpperCamelCase_ ) __lowercase : str = projection_dim __lowercase : str = logit_scale_init_value __lowercase : List[Any] = 1.0 @classmethod def _lowerCamelCase ( cls , UpperCamelCase_ , UpperCamelCase_ , **UpperCamelCase_ ) -> Optional[int]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[int]: __lowercase : List[Any] = copy.deepcopy(self.__dict__ ) __lowercase : Optional[Any] = self.text_config.to_dict() __lowercase : int = self.vision_config.to_dict() __lowercase : Optional[int] = self.__class__.model_type return output
76
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __UpperCAmelCase ( __UpperCamelCase ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __lowercase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __lowercase : Dict = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __lowercase : Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __lowercase : Tuple = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __lowercase : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __lowercase : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __lowercase : Optional[int] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __lowercase : Union[str, Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __lowercase : str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __lowercase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __lowercase : str = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __lowercase : Dict = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __lowercase : Union[str, Any] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __lowercase : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' ) __lowercase : Any = key.replace('''image_projection''' , '''flava.image_projection''' ) __lowercase : Tuple = value.float() for key, value in codebook_state_dict.items(): __lowercase : int = value return upgrade @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): if config_path is not None: __lowercase : Union[str, Any] = FlavaConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : Union[str, Any] = FlavaConfig() __lowercase : Any = FlavaForPreTraining(__UpperCamelCase ).eval() __lowercase : Any = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): __lowercase : Optional[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Optional[int] = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) __lowercase : Union[str, Any] = hf_model.state_dict() __lowercase : Optional[Any] = count_parameters(__UpperCamelCase ) __lowercase : List[Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
76
1
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process a_ = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : UpperCamelCase =field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) UpperCamelCase =field( default=snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) UpperCamelCase =field( default="NER" , metadata={"help": "Task type to fine tune in training (e.g. NER, POS, etc)"} ) UpperCamelCase =field( default=snake_case , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) UpperCamelCase =field(default=snake_case , metadata={"help": "Set this flag to use fast tokenization."} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. UpperCamelCase =field( default=snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , ) @dataclass class UpperCAmelCase_ : UpperCamelCase =field( metadata={"help": "The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."} ) UpperCamelCase =field( default=snake_case , metadata={"help": "Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."} , ) UpperCamelCase =field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) UpperCamelCase =field( default=snake_case , metadata={"help": "Overwrite the cached training and evaluation sets"} ) def __UpperCAmelCase ( ): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. __lowercase : Any = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __lowercase ,__lowercase ,__lowercase : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __lowercase ,__lowercase ,__lowercase : List[str] = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use""" ''' --overwrite_output_dir to overcome.''' ) __lowercase : int = import_module('''tasks''' ) try: __lowercase : List[Any] = getattr(__UpperCamelCase , model_args.task_type ) __lowercase : TokenClassificationTask = token_classification_task_clazz() except AttributeError: raise ValueError( f"""Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. """ f"""Available tasks classes are: {TokenClassificationTask.__subclasses__()}""" ) # Setup logging logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( '''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info('''Training/evaluation parameters %s''' , __UpperCamelCase ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __lowercase : Dict = token_classification_task.get_labels(data_args.labels ) __lowercase : Dict[int, str] = dict(enumerate(__UpperCamelCase ) ) __lowercase : Optional[int] = len(__UpperCamelCase ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __lowercase : int = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCamelCase , idalabel=__UpperCamelCase , labelaid={label: i for i, label in enumerate(__UpperCamelCase )} , cache_dir=model_args.cache_dir , ) __lowercase : str = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , ) __lowercase : Dict = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=__UpperCamelCase , cache_dir=model_args.cache_dir , ) # Get datasets __lowercase : List[str] = ( TokenClassificationDataset( token_classification_task=__UpperCamelCase , data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , labels=__UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , ) if training_args.do_train else None ) __lowercase : List[str] = ( TokenClassificationDataset( token_classification_task=__UpperCamelCase , data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , labels=__UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , ) if training_args.do_eval else None ) def align_predictions(__UpperCamelCase , __UpperCamelCase ) -> Tuple[List[int], List[int]]: __lowercase : int = np.argmax(__UpperCamelCase , axis=2 ) __lowercase ,__lowercase : Optional[Any] = preds.shape __lowercase : Any = [[] for _ in range(__UpperCamelCase )] __lowercase : List[str] = [[] for _ in range(__UpperCamelCase )] for i in range(__UpperCamelCase ): for j in range(__UpperCamelCase ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(__UpperCamelCase ) -> Dict: __lowercase ,__lowercase : str = align_predictions(p.predictions , p.label_ids ) return { "accuracy_score": accuracy_score(__UpperCamelCase , __UpperCamelCase ), "precision": precision_score(__UpperCamelCase , __UpperCamelCase ), "recall": recall_score(__UpperCamelCase , __UpperCamelCase ), "f1": fa_score(__UpperCamelCase , __UpperCamelCase ), } # Data collator __lowercase : Tuple = DataCollatorWithPadding(__UpperCamelCase , pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __lowercase : Tuple = Trainer( model=__UpperCamelCase , args=__UpperCamelCase , train_dataset=__UpperCamelCase , eval_dataset=__UpperCamelCase , compute_metrics=__UpperCamelCase , data_collator=__UpperCamelCase , ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __lowercase : Union[str, Any] = {} if training_args.do_eval: logger.info('''*** Evaluate ***''' ) __lowercase : List[Any] = trainer.evaluate() __lowercase : Optional[Any] = os.path.join(training_args.output_dir , '''eval_results.txt''' ) if trainer.is_world_process_zero(): with open(__UpperCamelCase , '''w''' ) as writer: logger.info('''***** Eval results *****''' ) for key, value in result.items(): logger.info(''' %s = %s''' , __UpperCamelCase , __UpperCamelCase ) writer.write('''%s = %s\n''' % (key, value) ) results.update(__UpperCamelCase ) # Predict if training_args.do_predict: __lowercase : List[str] = TokenClassificationDataset( token_classification_task=__UpperCamelCase , data_dir=data_args.data_dir , tokenizer=__UpperCamelCase , labels=__UpperCamelCase , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , ) __lowercase ,__lowercase ,__lowercase : Any = trainer.predict(__UpperCamelCase ) __lowercase ,__lowercase : int = align_predictions(__UpperCamelCase , __UpperCamelCase ) __lowercase : Tuple = os.path.join(training_args.output_dir , '''test_results.txt''' ) if trainer.is_world_process_zero(): with open(__UpperCamelCase , '''w''' ) as writer: for key, value in metrics.items(): logger.info(''' %s = %s''' , __UpperCamelCase , __UpperCamelCase ) writer.write('''%s = %s\n''' % (key, value) ) # Save predictions __lowercase : Optional[int] = os.path.join(training_args.output_dir , '''test_predictions.txt''' ) if trainer.is_world_process_zero(): with open(__UpperCamelCase , '''w''' ) as writer: with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f: token_classification_task.write_predictions_to_file(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) return results def __UpperCAmelCase ( __UpperCamelCase ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
76
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: super().__init__(**UpperCamelCase_ ) __lowercase : List[str] = size if size is not None else {'''shortest_edge''': 2_56} __lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowercase : Dict = get_size_dict(UpperCamelCase_ ) __lowercase : Dict = do_resize __lowercase : Optional[Any] = size __lowercase : List[Any] = resample __lowercase : Dict = do_center_crop __lowercase : Any = crop_size __lowercase : List[str] = do_rescale __lowercase : List[str] = rescale_factor __lowercase : Optional[Any] = do_normalize __lowercase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : List[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ) -> np.ndarray: return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> Optional[Any]: __lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Tuple = size if size is not None else self.size __lowercase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[str] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(UpperCamelCase_ ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : Any = image_std if image_std is not None else self.image_std __lowercase : Any = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowercase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowercase : Any = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowercase : str = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowercase : Optional[int] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowercase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowercase : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
76
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use DonutImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if digit_amount > 0: return round(number - int(__UpperCamelCase ) , __UpperCamelCase ) return number - int(__UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
1
"""simple docstring""" from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable a_ = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowercase : set[int] = set() return any( node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for node in graph ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): visited.add(__UpperCamelCase ) rec_stk.add(__UpperCamelCase ) for node in graph[vertex]: if node not in visited: if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__UpperCamelCase ) return False if __name__ == "__main__": from doctest import testmod testmod()
76
1
"""simple docstring""" import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) a_ = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[int] = {} state_dict.pop('''pixel_mean''' , __UpperCamelCase ) state_dict.pop('''pixel_std''' , __UpperCamelCase ) __lowercase : List[str] = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*''' for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __lowercase : Any = key.replace(__UpperCamelCase , __UpperCamelCase ) if re.match(__UpperCamelCase , __UpperCamelCase ): __lowercase : Union[str, Any] = int(re.match(__UpperCamelCase , __UpperCamelCase ).group(2 ) ) if layer_nb == 0: __lowercase : Union[str, Any] = key.replace('''layers.0''' , '''proj_in''' ) elif layer_nb == 1: __lowercase : List[Any] = key.replace('''layers.1''' , '''layers.0''' ) elif layer_nb == 2: __lowercase : int = key.replace('''layers.2''' , '''proj_out''' ) __lowercase : Tuple = value __lowercase : int = model_state_dict[ '''prompt_encoder.shared_embedding.positional_embedding''' ] return model_state_dict def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="ybelkada/segment-anything" ): __lowercase : int = hf_hub_download(__UpperCamelCase , f"""checkpoints/{model_name}.pth""" ) if "sam_vit_b" in model_name: __lowercase : int = SamConfig() elif "sam_vit_l" in model_name: __lowercase : Dict = SamVisionConfig( hidden_size=10_24 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , ) __lowercase : List[Any] = SamConfig( vision_config=__UpperCamelCase , ) elif "sam_vit_h" in model_name: __lowercase : Dict = SamVisionConfig( hidden_size=12_80 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , ) __lowercase : List[str] = SamConfig( vision_config=__UpperCamelCase , ) __lowercase : List[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' ) __lowercase : List[Any] = replace_keys(__UpperCamelCase ) __lowercase : Optional[Any] = SamImageProcessor() __lowercase : Union[str, Any] = SamProcessor(image_processor=__UpperCamelCase ) __lowercase : List[Any] = SamModel(__UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) __lowercase : Dict = hf_model.to('''cuda''' ) __lowercase : List[str] = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png''' __lowercase : List[str] = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ).convert('''RGB''' ) __lowercase : Union[str, Any] = [[[4_00, 6_50]]] __lowercase : List[str] = [[1]] __lowercase : Optional[int] = processor(images=np.array(__UpperCamelCase ) , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowercase : Dict = hf_model(**__UpperCamelCase ) __lowercase : List[str] = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579_890_251_159_668 __lowercase : Optional[int] = processor( images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowercase : str = hf_model(**__UpperCamelCase ) __lowercase : Optional[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_712_603_092_193_604 __lowercase : List[str] = ((75, 2_75, 17_25, 8_50),) __lowercase : int = processor(images=np.array(__UpperCamelCase ) , input_boxes=__UpperCamelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowercase : str = hf_model(**__UpperCamelCase ) __lowercase : Optional[Any] = output.iou_scores.squeeze() assert scores[-1].item() == 0.8_686_015_605_926_514 # Test with 2 points and 1 image. __lowercase : Optional[Any] = [[[4_00, 6_50], [8_00, 6_50]]] __lowercase : Union[str, Any] = [[1, 1]] __lowercase : str = processor( images=np.array(__UpperCamelCase ) , input_points=__UpperCamelCase , input_labels=__UpperCamelCase , return_tensors='''pt''' ).to('''cuda''' ) with torch.no_grad(): __lowercase : Optional[int] = hf_model(**__UpperCamelCase ) __lowercase : Any = output.iou_scores.squeeze() assert scores[-1].item() == 0.9_936_047_792_434_692 if __name__ == "__main__": a_ = argparse.ArgumentParser() a_ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) a_ = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
76
"""simple docstring""" import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a_ = logging.getLogger(__name__) class UpperCAmelCase_ ( snake_case ): def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]: __lowercase : Tuple = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] ) __lowercase : Any = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> int: super().__init__(UpperCamelCase_ ) __lowercase : Optional[Any] = BertEncoderWithPabee(UpperCamelCase_ ) self.init_weights() __lowercase : str = 0 __lowercase : Optional[Any] = 0 __lowercase : Optional[int] = 0 __lowercase : int = 0 def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = threshold def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Optional[int] = patience def _lowerCamelCase ( self ) -> List[str]: __lowercase : Tuple = 0 __lowercase : Tuple = 0 def _lowerCamelCase ( self ) -> List[Any]: __lowercase : Optional[int] = self.inference_layers_num / self.inference_instances_num __lowercase : int = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: __lowercase : Tuple = input_ids.size() elif inputs_embeds is not None: __lowercase : List[Any] = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) __lowercase : int = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: __lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __lowercase ,__lowercase ,__lowercase : Optional[int] = encoder_hidden_states.size() __lowercase : Any = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __lowercase : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) __lowercase : Tuple = self.invert_attention_mask(UpperCamelCase_ ) else: __lowercase : Tuple = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) __lowercase : Optional[int] = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) __lowercase : Union[str, Any] = embedding_output if self.training: __lowercase : List[Any] = [] for i in range(self.config.num_hidden_layers ): __lowercase : str = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : int = self.pooler(UpperCamelCase_ ) __lowercase : str = output_layers[i](output_dropout(UpperCamelCase_ ) ) res.append(UpperCamelCase_ ) elif self.patience == 0: # Use all layers for inference __lowercase : int = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) __lowercase : Optional[Any] = self.pooler(encoder_outputs[0] ) __lowercase : int = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )] else: __lowercase : Optional[int] = 0 __lowercase : Union[str, Any] = None __lowercase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __lowercase : Tuple = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : Dict = self.pooler(UpperCamelCase_ ) __lowercase : Optional[int] = output_layers[i](UpperCamelCase_ ) if regression: __lowercase : Any = logits.detach() if patient_result is not None: __lowercase : List[str] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __lowercase : int = 0 else: __lowercase : List[str] = logits.detach().argmax(dim=1 ) if patient_result is not None: __lowercase : Optional[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ): patient_counter += 1 else: __lowercase : Tuple = 0 __lowercase : Union[str, Any] = logits if patient_counter == self.patience: break __lowercase : Optional[int] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> Optional[Any]: super().__init__(UpperCamelCase_ ) __lowercase : List[Any] = config.num_labels __lowercase : int = BertModelWithPabee(UpperCamelCase_ ) __lowercase : int = nn.Dropout(config.hidden_dropout_prob ) __lowercase : Union[str, Any] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> int: __lowercase : Union[str, Any] = self.bert( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __lowercase : List[str] = (logits[-1],) if labels is not None: __lowercase : Any = None __lowercase : Optional[int] = 0 for ix, logits_item in enumerate(UpperCamelCase_ ): if self.num_labels == 1: # We are doing regression __lowercase : Any = MSELoss() __lowercase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __lowercase : str = CrossEntropyLoss() __lowercase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __lowercase : List[str] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __lowercase : Union[str, Any] = (total_loss / total_weights,) + outputs return outputs
76
1
"""simple docstring""" from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf a_ = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ ( snake_case ): UpperCamelCase =[ "no_inference", "no_cuda", "no_tpu", "no_speed", "no_memory", "no_env_print", "no_multi_process", ] def __init__( self , **UpperCamelCase_ ) -> Union[str, Any]: for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __lowercase : Tuple = deprecated_arg[3:] __lowercase : Any = not kwargs.pop(UpperCamelCase_ ) logger.warning( F"""{deprecated_arg} is depreciated. Please use --no-{positive_arg} or""" F""" {positive_arg}={kwargs[positive_arg]}""" ) __lowercase : List[Any] = kwargs.pop('''tpu_name''' , self.tpu_name ) __lowercase : Union[str, Any] = kwargs.pop('''device_idx''' , self.device_idx ) __lowercase : Optional[int] = kwargs.pop('''eager_mode''' , self.eager_mode ) __lowercase : Optional[int] = kwargs.pop('''use_xla''' , self.use_xla ) super().__init__(**UpperCamelCase_ ) UpperCamelCase =field( default=snake_case , metadata={"help": "Name of TPU"} , ) UpperCamelCase =field( default=0 , metadata={"help": "CPU / GPU device index. Defaults to 0."} , ) UpperCamelCase =field(default=snake_case , metadata={"help": "Benchmark models in eager model."} ) UpperCamelCase =field( default=snake_case , metadata={ "help": "Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`." } , ) @cached_property def _lowerCamelCase ( self ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['''tf'''] ) __lowercase : Optional[Any] = None if self.tpu: try: if self.tpu_name: __lowercase : Tuple = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __lowercase : List[str] = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __lowercase : List[str] = None return tpu @cached_property def _lowerCamelCase ( self ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: requires_backends(self , ['''tf'''] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __lowercase : Any = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , '''GPU''' ) __lowercase : Dict = tf.distribute.OneDeviceStrategy(device=F"""/gpu:{self.device_idx}""" ) else: tf.config.set_visible_devices([] , '''GPU''' ) # disable GPU __lowercase : str = tf.distribute.OneDeviceStrategy(device=F"""/cpu:{self.device_idx}""" ) return strategy @property def _lowerCamelCase ( self ) -> bool: requires_backends(self , ['''tf'''] ) return self._setup_tpu is not None @property def _lowerCamelCase ( self ) -> "tf.distribute.Strategy": requires_backends(self , ['''tf'''] ) return self._setup_strategy @property def _lowerCamelCase ( self ) -> Optional[int]: requires_backends(self , ['''tf'''] ) return tf.config.list_physical_devices('''GPU''' ) @property def _lowerCamelCase ( self ) -> int: requires_backends(self , ['''tf'''] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _lowerCamelCase ( self ) -> bool: return self.n_gpu > 0
76
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split('''.''' ): __lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __lowercase : int = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase : List[str] = value elif weight_type == "weight_g": __lowercase : Optional[Any] = value elif weight_type == "weight_v": __lowercase : Tuple = value elif weight_type == "bias": __lowercase : Dict = value else: __lowercase : Union[str, Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Tuple = [] __lowercase : Union[str, Any] = fairseq_model.state_dict() __lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __lowercase : int = True if "*" in mapped_key: __lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2] __lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __lowercase : Tuple = '''weight_g''' elif "weight_v" in name: __lowercase : Optional[int] = '''weight_v''' elif "weight" in name: __lowercase : str = '''weight''' elif "bias" in name: __lowercase : Optional[int] = '''bias''' else: __lowercase : List[str] = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1] __lowercase : str = name.split('''.''' ) __lowercase : Dict = int(items[0] ) __lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): if config_path is not None: __lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : str = HubertConfig() if is_finetuned: if dict_path: __lowercase : Tuple = Dictionary.load(__UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : int = target_dict.pad_index __lowercase : Union[str, Any] = target_dict.bos_index __lowercase : int = target_dict.eos_index __lowercase : int = len(target_dict.symbols ) __lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' ) if not os.path.isdir(__UpperCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) ) return os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __UpperCamelCase ) __lowercase : str = WavaVecaCTCTokenizer( __UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , ) __lowercase : str = True if config.feat_extract_norm == '''layer''' else False __lowercase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ) __lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) __lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase ) else: __lowercase : Union[str, Any] = HubertModel(__UpperCamelCase ) if is_finetuned: __lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) a_ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
76
1
"""simple docstring""" import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): def __init__( self , UpperCamelCase_ , UpperCamelCase_=7 , UpperCamelCase_=3 , UpperCamelCase_=18 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_=False , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=[0.5, 0.5, 0.5] , UpperCamelCase_=[0.5, 0.5, 0.5] , ) -> Union[str, Any]: __lowercase : Dict = parent __lowercase : int = batch_size __lowercase : Union[str, Any] = num_channels __lowercase : Optional[Any] = image_size __lowercase : Tuple = min_resolution __lowercase : Union[str, Any] = max_resolution __lowercase : Any = do_resize __lowercase : List[Any] = size if size is not None else {'''height''': 18, '''width''': 20} __lowercase : Any = do_thumbnail __lowercase : str = do_align_axis __lowercase : Optional[Any] = do_pad __lowercase : Optional[Any] = do_normalize __lowercase : Tuple = image_mean __lowercase : Tuple = image_std def _lowerCamelCase ( self ) -> int: return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class UpperCAmelCase_ ( snake_case , unittest.TestCase ): UpperCamelCase =DonutImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ) -> Any: __lowercase : Dict = DonutImageProcessingTester(self ) @property def _lowerCamelCase ( self ) -> int: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Dict = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_thumbnail''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_align_long_axis''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_pad''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : int = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 20} ) __lowercase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} ) # Previous config had dimensions in (width, height) order __lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {'''height''': 84, '''width''': 42} ) def _lowerCamelCase ( self ) -> Dict: pass @is_flaky() def _lowerCamelCase ( self ) -> Optional[Any]: # Initialize image_processing __lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input __lowercase : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __lowercase : List[str] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def _lowerCamelCase ( self ) -> int: # Initialize image_processing __lowercase : int = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input __lowercase : Union[str, Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __lowercase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) @is_flaky() def _lowerCamelCase ( self ) -> List[Any]: # Initialize image_processing __lowercase : Any = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input __lowercase : Tuple = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , ) # Test batched __lowercase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size['''height'''], self.image_processor_tester.size['''width'''], ) , )
76
"""simple docstring""" a_ = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
76
1
"""simple docstring""" import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): # set parameter of one layer assert torch_layer.weight.shape == weight.shape, f"""{torch_layer} layer.weight does not match""" __lowercase : List[Any] = nn.Parameter(__UpperCamelCase ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f"""{torch_layer} layer.bias does not match""" __lowercase : Optional[int] = nn.Parameter(__UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # set torch weights for 1-to-1 comparison __lowercase : Union[str, Any] = np.asarray(weights[0] ) __lowercase : List[str] = np.asarray(weights[1] ) __lowercase : Tuple = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key , torch.tensor(__UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCamelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCamelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(__UpperCamelCase ).view(-1 , __UpperCamelCase ).contiguous().transpose(0 , 1 ) , ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # set torch weights for 1-to-1 comparison __lowercase : Optional[Any] = np.asarray(weights[0] ) __lowercase : Dict = np.asarray(weights[1] ) __lowercase : Union[str, Any] = np.asarray(weights[2] ) __lowercase : str = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query , torch.tensor(__UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCamelCase ) , ) set_param( torch_layer.self_attention.key , torch.tensor(__UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCamelCase ) , ) set_param( torch_layer.self_attention.value , torch.tensor(__UpperCamelCase ).transpose(1 , 2 ).contiguous().view(-1 , __UpperCamelCase ) , ) set_param( torch_layer.output.dense , torch.tensor(__UpperCamelCase ).view(-1 , __UpperCamelCase ).contiguous().transpose(0 , 1 ) , ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # layernorm 1 __lowercase : Tuple = weights[0][0][0] __lowercase : Optional[Any] = np.asarray(layer_norm_a[0] ) __lowercase : Union[str, Any] = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm , torch.tensor(__UpperCamelCase ) , torch.tensor(__UpperCamelCase ) , ) # lsh weights + output __lowercase : Tuple = weights[0][1] if len(__UpperCamelCase ) < 4: set_layer_weights_in_torch_lsh(__UpperCamelCase , torch_block.attention , __UpperCamelCase ) else: set_layer_weights_in_torch_local(__UpperCamelCase , torch_block.attention , __UpperCamelCase ) # intermediate weighs __lowercase : Any = weights[2][0][1][2] # Chunked Feed Forward if len(__UpperCamelCase ) == 4: __lowercase : List[Any] = intermediate_weights[2] # layernorm 2 __lowercase : Tuple = np.asarray(intermediate_weights[0][0] ) __lowercase : str = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm , torch.tensor(__UpperCamelCase ) , torch.tensor(__UpperCamelCase ) , ) # intermediate dense __lowercase : Optional[Any] = np.asarray(intermediate_weights[1][0] ) __lowercase : Tuple = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense , torch.tensor(__UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__UpperCamelCase ) , ) # intermediate out __lowercase : Any = np.asarray(intermediate_weights[4][0] ) __lowercase : List[Any] = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense , torch.tensor(__UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__UpperCamelCase ) , ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # reformer model __lowercase : str = torch_model.reformer # word embeds __lowercase : Optional[int] = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings , torch.tensor(__UpperCamelCase ) , ) if isinstance(weights[3] , __UpperCamelCase ): __lowercase : Union[str, Any] = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __lowercase : List[str] = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f"""{position_embeddings[emb_idx]} emb does not match""" __lowercase : int = nn.Parameter(torch.tensor(__UpperCamelCase ) ) __lowercase : List[str] = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( __UpperCamelCase ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __lowercase : Union[str, Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # output layer norm __lowercase : Union[str, Any] = np.asarray(weights[7][0] ) __lowercase : Optional[Any] = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm , torch.tensor(__UpperCamelCase ) , torch.tensor(__UpperCamelCase ) , ) # output embeddings __lowercase : int = np.asarray(weights[9][0] ) __lowercase : List[str] = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder , torch.tensor(__UpperCamelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(__UpperCamelCase ) , ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # Initialise PyTorch model __lowercase : Tuple = ReformerConfig.from_json_file(__UpperCamelCase ) print(f"""Building PyTorch model from configuration: {config}""" ) __lowercase : Tuple = ReformerModelWithLMHead(__UpperCamelCase ) with open(__UpperCamelCase , '''rb''' ) as f: __lowercase : int = pickle.load(__UpperCamelCase )['''weights'''] set_model_weights_in_torch(__UpperCamelCase , __UpperCamelCase , config.hidden_size ) # Save pytorch-model print(f"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a_ = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
76
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="openai/whisper-base" UpperCamelCase =( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) UpperCamelCase ="transcriber" UpperCamelCase =WhisperProcessor UpperCamelCase =WhisperForConditionalGeneration UpperCamelCase =["audio"] UpperCamelCase =["text"] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).input_features def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.model.generate(inputs=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0]
76
1
"""simple docstring""" from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
76
"""simple docstring""" import gc import threading import time import psutil import torch class UpperCAmelCase_ : def __init__( self ) -> str: __lowercase : List[Any] = psutil.Process() __lowercase : Any = False def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Optional[Any] = -1 while True: __lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[Any] = True __lowercase : List[Any] = threading.Thread(target=self.peak_monitor ) __lowercase : Optional[int] = True self.thread.start() def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Union[str, Any] = False self.thread.join() return self.cpu_memory_peak a_ = PeakCPUMemory() def __UpperCAmelCase ( ): # Time __lowercase : Union[str, Any] = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : List[Any] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase ) torch.cuda.reset_peak_memory_stats() return measures def __UpperCAmelCase ( __UpperCamelCase ): # Time __lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20 __lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 __lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 return measures def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): print(f"""{description}:""" ) print(f"""- Time: {measures["time"]:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" ) __lowercase : Dict = measures[f"""{i}-peak"""] print(f"""- GPU {i} peak: {peak:.2f}MiB""" ) print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" ) print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
76
1
"""simple docstring""" from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def __UpperCAmelCase ( ): __lowercase : Union[str, Any] = HfArgumentParser(__UpperCamelCase ) __lowercase : int = parser.parse_args_into_dataclasses()[0] __lowercase : int = TensorFlowBenchmark(args=__UpperCamelCase ) try: __lowercase : str = parser.parse_args_into_dataclasses()[0] except ValueError as e: __lowercase : Optional[int] = '''Arg --no_{0} is no longer used, please use --no-{0} instead.''' __lowercase : Any = ''' '''.join(str(__UpperCamelCase ).split(''' ''' )[:-1] ) __lowercase : int = '''''' __lowercase : List[str] = eval(str(__UpperCamelCase ).split(''' ''' )[-1] ) __lowercase : Any = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(__UpperCamelCase ) if len(__UpperCamelCase ) > 0: __lowercase : Union[str, Any] = full_error_msg + begin_error_msg + str(__UpperCamelCase ) raise ValueError(__UpperCamelCase ) benchmark.run() if __name__ == "__main__": main()
76
"""simple docstring""" import numpy as np import datasets a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def _lowerCamelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: # convert to numpy arrays __lowercase : Dict = np.array(UpperCamelCase_ ) __lowercase : str = np.array(UpperCamelCase_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction __lowercase : Tuple = X - np.mean(UpperCamelCase_ ) __lowercase : List[Any] = np.cov(reference_distribution.T ) try: __lowercase : Tuple = np.linalg.inv(UpperCamelCase_ ) except np.linalg.LinAlgError: __lowercase : str = np.linalg.pinv(UpperCamelCase_ ) __lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
76
1
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def __UpperCAmelCase ( __UpperCamelCase ): __lowercase ,__lowercase ,__lowercase : List[str] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_989 * r + 0.5_870 * g + 0.1_140 * b def __UpperCAmelCase ( __UpperCamelCase ): return (gray > 1_27) & (gray <= 2_55) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Union[str, Any] = np.zeros_like(__UpperCamelCase ) __lowercase : List[Any] = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __lowercase : List[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __lowercase : Any = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __lowercase : Dict = int(summation > 0 ) return output if __name__ == "__main__": # read original image a_ = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' a_ = np.array(Image.open(lena_path)) # kernel to be applied a_ = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) a_ = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image a_ = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
76
"""simple docstring""" a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __UpperCAmelCase ( __UpperCamelCase ): # Make sure the supplied data is a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : str = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(__UpperCamelCase ) __lowercase : Any = ''''''.join(bin(__UpperCamelCase )[2:].zfill(8 ) for byte in data ) __lowercase : List[str] = len(__UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __lowercase : int = B'''=''' * ((6 - len(__UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__UpperCamelCase ) % 6) else: __lowercase : Any = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__UpperCamelCase ) , 6 ) ).encode() + padding ) def __UpperCAmelCase ( __UpperCamelCase ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : List[str] = ( '''argument should be a bytes-like object or ASCII string, ''' f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(__UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__UpperCamelCase , __UpperCamelCase ): try: __lowercase : List[str] = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) __lowercase : Dict = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __lowercase : Tuple = encoded_data[:-padding] __lowercase : str = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __lowercase : Any = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __lowercase : int = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__UpperCamelCase ) , 8 ) ] return bytes(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "pytorch", "script": "run_ddp.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.7, "eval_loss": 0.6}, }, { "framework": "tensorflow", "script": "run_tf_dist.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.p3.16xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.6, "eval_loss": 0.7}, }, ] ) class UpperCAmelCase_ ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Optional[Any]: if self.framework == "pytorch": subprocess.run( F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=UpperCamelCase_ , ) assert hasattr(self , '''env''' ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : str = F"""{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}""" # distributed data settings __lowercase : List[str] = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase_ , instance_count=UpperCamelCase_ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase_ , py_version='''py36''' , ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[Any]: TrainingJobAnalytics(UpperCamelCase_ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" ) @parameterized.expand([(2,)] ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Any: # create estimator __lowercase : List[str] = self.create_estimator(UpperCamelCase_ ) # run training estimator.fit() # result dataframe __lowercase : Dict = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __lowercase : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) __lowercase : Optional[Any] = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __lowercase : int = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , UpperCamelCase_ )
76
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } a_ = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } a_ = { 'ctrl': 2_5_6, } a_ = { 'Pregnancy': 1_6_8_6_2_9, 'Christianity': 7_6_7_5, 'Explain': 1_0_6_4_2_3, 'Fitness': 6_3_4_4_0, 'Saving': 6_3_1_6_3, 'Ask': 2_7_1_7_1, 'Ass': 9_5_9_8_5, 'Joke': 1_6_3_5_0_9, 'Questions': 4_5_6_2_2, 'Thoughts': 4_9_6_0_5, 'Retail': 5_2_3_4_2, 'Feminism': 1_6_4_3_3_8, 'Writing': 1_1_9_9_2, 'Atheism': 1_9_2_2_6_3, 'Netflix': 4_8_6_1_6, 'Computing': 3_9_6_3_9, 'Opinion': 4_3_2_1_3, 'Alone': 4_4_9_6_7, 'Funny': 5_8_9_1_7, 'Gaming': 4_0_3_5_8, 'Human': 4_0_8_8, 'India': 1_3_3_1, 'Joker': 7_7_1_3_8, 'Diet': 3_6_2_0_6, 'Legal': 1_1_8_5_9, 'Norman': 4_9_3_9, 'Tip': 7_2_6_8_9, 'Weight': 5_2_3_4_3, 'Movies': 4_6_2_7_3, 'Running': 2_3_4_2_5, 'Science': 2_0_9_0, 'Horror': 3_7_7_9_3, 'Confession': 6_0_5_7_2, 'Finance': 1_2_2_5_0, 'Politics': 1_6_3_6_0, 'Scary': 1_9_1_9_8_5, 'Support': 1_2_6_5_4, 'Technologies': 3_2_5_1_6, 'Teenage': 6_6_1_6_0, 'Event': 3_2_7_6_9, 'Learned': 6_7_4_6_0, 'Notion': 1_8_2_7_7_0, 'Wikipedia': 3_7_5_8_3, 'Books': 6_6_6_5, 'Extract': 7_6_0_5_0, 'Confessions': 1_0_2_7_0_1, 'Conspiracy': 7_5_9_3_2, 'Links': 6_3_6_7_4, 'Narcissus': 1_5_0_4_2_5, 'Relationship': 5_4_7_6_6, 'Relationships': 1_3_4_7_9_6, 'Reviews': 4_1_6_7_1, 'News': 4_2_5_6, 'Translation': 2_6_8_2_0, 'multilingual': 1_2_8_4_0_6, } def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Any = set() __lowercase : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase : Any = char __lowercase : List[Any] = set(__UpperCamelCase ) return pairs class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTROL_CODES def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ) -> int: super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: __lowercase : List[Any] = json.load(UpperCamelCase_ ) __lowercase : Any = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: __lowercase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1] __lowercase : Optional[Any] = [tuple(merge.split() ) for merge in merges] __lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowercase : Optional[Any] = {} @property def _lowerCamelCase ( self ) -> Union[str, Any]: return len(self.encoder ) def _lowerCamelCase ( self ) -> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.cache: return self.cache[token] __lowercase : str = tuple(UpperCamelCase_ ) __lowercase : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowercase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __lowercase : Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowercase ,__lowercase : Tuple = bigram __lowercase : int = [] __lowercase : Union[str, Any] = 0 while i < len(UpperCamelCase_ ): try: __lowercase : Optional[int] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase : Tuple = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase : List[str] = tuple(UpperCamelCase_ ) __lowercase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __lowercase : List[str] = get_pairs(UpperCamelCase_ ) __lowercase : Optional[Any] = '''@@ '''.join(UpperCamelCase_ ) __lowercase : Dict = word[:-4] __lowercase : str = word return word def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: __lowercase : List[Any] = [] __lowercase : int = re.findall(R'''\S+\n?''' , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) ) return split_tokens def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> int: return self.decoder.get(UpperCamelCase_ , self.unk_token ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Tuple = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) __lowercase : List[str] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __lowercase : Union[str, Any] = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
76
1
"""simple docstring""" a_ = tuple[float, float, float] a_ = tuple[float, float, float] def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : List[str] = end_pointa[0] - end_pointa[0] __lowercase : str = end_pointa[1] - end_pointa[1] __lowercase : str = end_pointa[2] - end_pointa[2] return (x, y, z) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Tuple = ab[1] * ac[2] - ab[2] * ac[1] # *i __lowercase : Any = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j __lowercase : int = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return tuple(round(__UpperCamelCase , __UpperCamelCase ) for x in vector ) == (0, 0, 0) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 10 ): __lowercase : Dict = create_vector(__UpperCamelCase , __UpperCamelCase ) __lowercase : Optional[int] = create_vector(__UpperCamelCase , __UpperCamelCase ) return is_zero_vector(get_ad_vectors_cross(__UpperCamelCase , __UpperCamelCase ) , __UpperCamelCase )
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json', # See all ViT models at https://huggingface.co/models?filter=vit } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="vit" def __init__( self , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=12 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=2_24 , UpperCamelCase_=16 , UpperCamelCase_=3 , UpperCamelCase_=True , UpperCamelCase_=16 , **UpperCamelCase_ , ) -> Any: super().__init__(**UpperCamelCase_ ) __lowercase : Tuple = hidden_size __lowercase : List[str] = num_hidden_layers __lowercase : List[Any] = num_attention_heads __lowercase : List[str] = intermediate_size __lowercase : str = hidden_act __lowercase : Optional[Any] = hidden_dropout_prob __lowercase : Any = attention_probs_dropout_prob __lowercase : Optional[Any] = initializer_range __lowercase : int = layer_norm_eps __lowercase : Optional[int] = image_size __lowercase : List[str] = patch_size __lowercase : Union[str, Any] = num_channels __lowercase : str = qkv_bias __lowercase : int = encoder_stride class UpperCAmelCase_ ( snake_case ): UpperCamelCase =version.parse("1.11" ) @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}), ] ) @property def _lowerCamelCase ( self ) -> float: return 1E-4
76
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = '▁' a_ = {'vocab_file': 'sentencepiece.bpe.model'} a_ = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } a_ = { 'xlm-roberta-base': 5_1_2, 'xlm-roberta-large': 5_1_2, 'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2, 'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2, 'xlm-roberta-large-finetuned-conll03-english': 5_1_2, 'xlm-roberta-large-finetuned-conll03-german': 5_1_2, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __lowercase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __lowercase : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase : Tuple = 1 __lowercase : Any = len(self.sp_model ) + self.fairseq_offset __lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: __lowercase : int = self.__dict__.copy() __lowercase : int = None __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ) -> Tuple: __lowercase : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowercase : str = {} __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase : Dict = [self.cls_token_id] __lowercase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: __lowercase : Optional[Any] = [self.sep_token_id] __lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _lowerCamelCase ( self ) -> str: __lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
76
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): def _lowerCamelCase ( self ) -> str: __lowercase : Dict = tempfile.mkdtemp() __lowercase : Optional[Any] = [ '''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''的''', '''价''', '''格''', '''是''', '''15''', '''便''', '''alex''', '''##andra''', ''',''', '''。''', '''-''', '''t''', '''shirt''', ] __lowercase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer: vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) ) __lowercase : str = { '''do_resize''': True, '''size''': {'''height''': 2_24, '''width''': 2_24}, '''do_center_crop''': True, '''crop_size''': {'''height''': 18, '''width''': 18}, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], '''do_convert_rgb''': True, } __lowercase : Union[str, Any] = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[str]: return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> List[Any]: return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> str: return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def _lowerCamelCase ( self ) -> int: shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Tuple = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase : Dict = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCamelCase ( self ) -> str: __lowercase : Tuple = self.get_tokenizer() __lowercase : int = self.get_rust_tokenizer() __lowercase : Union[str, Any] = self.get_image_processor() __lowercase : List[Any] = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) __lowercase : int = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ ) __lowercase : Tuple = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) __lowercase : List[Any] = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Dict = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase : Tuple = self.get_tokenizer(cls_token='''(CLS)''' , sep_token='''(SEP)''' ) __lowercase : str = self.get_image_processor(do_normalize=UpperCamelCase_ ) __lowercase : Union[str, Any] = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token='''(CLS)''' , sep_token='''(SEP)''' , do_normalize=UpperCamelCase_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Dict: __lowercase : Dict = self.get_image_processor() __lowercase : str = self.get_tokenizer() __lowercase : Optional[int] = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : Optional[Any] = self.prepare_image_inputs() __lowercase : Union[str, Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' ) __lowercase : Dict = processor(images=UpperCamelCase_ , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCamelCase ( self ) -> int: __lowercase : Dict = self.get_image_processor() __lowercase : Optional[int] = self.get_tokenizer() __lowercase : List[str] = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : Any = '''Alexandra,T-shirt的价格是15便士。''' __lowercase : Optional[int] = processor(text=UpperCamelCase_ ) __lowercase : Optional[int] = tokenizer(UpperCamelCase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowerCamelCase ( self ) -> List[Any]: __lowercase : str = self.get_image_processor() __lowercase : Dict = self.get_tokenizer() __lowercase : Any = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : str = '''Alexandra,T-shirt的价格是15便士。''' __lowercase : Optional[Any] = self.prepare_image_inputs() __lowercase : str = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def _lowerCamelCase ( self ) -> Any: __lowercase : int = self.get_image_processor() __lowercase : Optional[Any] = self.get_tokenizer() __lowercase : str = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase : List[str] = processor.batch_decode(UpperCamelCase_ ) __lowercase : Optional[int] = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ ) def _lowerCamelCase ( self ) -> int: __lowercase : int = self.get_image_processor() __lowercase : Dict = self.get_tokenizer() __lowercase : Any = ChineseCLIPProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : List[str] = '''Alexandra,T-shirt的价格是15便士。''' __lowercase : Any = self.prepare_image_inputs() __lowercase : int = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
76
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput a_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Tuple: super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Union[str, Any] = eval_examples __lowercase : Union[str, Any] = post_process_function __lowercase : Any = quant_trainer_args __lowercase : Optional[Any] = 1_28 # default number of calibration samples def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __lowercase : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset __lowercase : str = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' ) return DataLoader( UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , ) def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: __lowercase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset __lowercase : List[Any] = self.get_calib_dataloader(UpperCamelCase_ ) __lowercase : Dict = self.model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ ) model.eval() quant_trainer.enable_calibration(UpperCamelCase_ ) logger.info('''***** Running calibration *****''' ) logger.info(F""" Num examples = {self.calib_num}""" ) logger.info(F""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(UpperCamelCase_ ): # Prediction step __lowercase ,__lowercase ,__lowercase : Optional[Any] = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = model def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> str: __lowercase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __lowercase : Union[str, Any] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowercase : Optional[int] = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Tuple = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __lowercase : int = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) self.log(UpperCamelCase_ ) else: __lowercase : Dict = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowercase : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> List[Any]: __lowercase : Optional[int] = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. __lowercase : str = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Union[str, Any] = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : Any = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __lowercase : Dict = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_="./" ) -> int: __lowercase : Optional[int] = self.eval_dataset __lowercase : Optional[int] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : Any = next(iter(UpperCamelCase_ ) ) # saving device - to make it consistent __lowercase : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __lowercase : Tuple = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __lowercase : List[Any] = True __lowercase : int = self.model.to(UpperCamelCase_ ) model.eval() model.float() __lowercase : Optional[int] = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = os.path.join(UpperCamelCase_ , '''model.onnx''' ) logger.info(F"""exporting model to {output_model_file}""" ) __lowercase : Tuple = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=UpperCamelCase_ , ) logger.info('''onnx export finished''' )
76
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) a_ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" __lowercase : Dict = float(embedding_dim // 2 ) __lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) __lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 ) # scale embeddings __lowercase : Optional[int] = scale * emb if flip_sin_to_cos: __lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 ) else: __lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 ) __lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] ) return signal class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =jnp.floataa @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ ) __lowercase : str = nn.silu(UpperCamelCase_ ) __lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ ) return temb class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =False UpperCamelCase =1 @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: return get_sinusoidal_embeddings( UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
76
1
"""simple docstring""" import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument a_ = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def __UpperCAmelCase ( __UpperCamelCase ): # 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in # the original model __lowercase : List[Any] = list(s_dict.keys() ) for key in keys: __lowercase : Optional[int] = R'''.*/layers_(\d+)''' __lowercase : Tuple = key if re.match(__UpperCamelCase , __UpperCamelCase ): __lowercase : Optional[Any] = re.sub(R'''layers_(\d+)''' , R'''block/\1/layer''' , __UpperCamelCase ) __lowercase : str = R'''(encoder|decoder)\/''' if re.match(__UpperCamelCase , __UpperCamelCase ): __lowercase : Union[str, Any] = re.match(__UpperCamelCase , __UpperCamelCase ).groups() if groups[0] == "encoder": __lowercase : Dict = re.sub(R'''/mlp/''' , R'''/1/mlp/''' , __UpperCamelCase ) __lowercase : Union[str, Any] = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/1/layer_norm/''' , __UpperCamelCase ) elif groups[0] == "decoder": __lowercase : Optional[Any] = re.sub(R'''/mlp/''' , R'''/2/mlp/''' , __UpperCamelCase ) __lowercase : Dict = re.sub(R'''/pre_mlp_layer_norm/''' , R'''/2/layer_norm/''' , __UpperCamelCase ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __lowercase : Optional[Any] = new_key.replace(__UpperCamelCase , __UpperCamelCase ) print(f"""{key} -> {new_key}""" ) __lowercase : Union[str, Any] = s_dict.pop(__UpperCamelCase ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowercase : Optional[Any] = s_dict[ '''encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __lowercase : List[str] = s_dict[ '''decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight''' ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __lowercase : List[str] = s_dict[key].shape[0] __lowercase : str = s_dict[key] for idx in range(__UpperCamelCase ): __lowercase : str = expert_weihts[idx] print(f"""{key} -> {key.replace("expert/" , "nested fstring" )}""" ) s_dict.pop(__UpperCamelCase ) return s_dict a_ = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): # Convert a google style config to the hugging face fromat import regex as re with open(__UpperCamelCase , '''r''' ) as f: __lowercase : Dict = f.read() __lowercase : Tuple = re.findall(R'''(.*) = ([0-9.]*)''' , __UpperCamelCase ) __lowercase : Union[str, Any] = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __lowercase : Tuple = float(__UpperCamelCase ) if '''.''' in value else int(__UpperCamelCase ) __lowercase : Any = re.findall(R'''(.*activations) = \(\'(.*)\',\)''' , __UpperCamelCase )[0] __lowercase : Optional[int] = str(activation[1] ) __lowercase : int = num_experts __lowercase : Optional[int] = SwitchTransformersConfig(**__UpperCamelCase ) return config def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase="./" , __UpperCamelCase=8 ): # Initialise PyTorch model print(f"""Loading flax weights from : {flax_checkpoint_path}""" ) __lowercase : Optional[int] = checkpoints.load_tax_checkpoint(__UpperCamelCase ) if gin_file is not None: __lowercase : Union[str, Any] = convert_gin_to_config(__UpperCamelCase , __UpperCamelCase ) else: __lowercase : List[Any] = SwitchTransformersConfig.from_pretrained(__UpperCamelCase ) __lowercase : Optional[Any] = SwitchTransformersForConditionalGeneration(__UpperCamelCase ) __lowercase : int = flax_params['''target'''] __lowercase : List[Any] = flatten_dict(__UpperCamelCase , sep='''/''' ) __lowercase : int = rename_keys(__UpperCamelCase ) __lowercase : Any = unflatten_dict(__UpperCamelCase , sep='''/''' ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(__UpperCamelCase , __UpperCamelCase ) print(f"""Save PyTorch model to {pytorch_dump_path}""" ) pt_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the' ' model architecture. If not provided, a `gin_file` has to be provided.' ), ) parser.add_argument( '--gin_file', default=None, type=str, required=False, help='Path to the gin config file. If not provided, a `config_file` has to be passed ', ) parser.add_argument( '--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.' ) parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts') a_ = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
76
"""simple docstring""" import os import sys a_ = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a_ = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModel.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
76
1
"""simple docstring""" from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["audio_values", "audio_mask"] def __init__( self , UpperCamelCase_=20_48 , UpperCamelCase_=1 , UpperCamelCase_=[16, 16] , UpperCamelCase_=1_28 , UpperCamelCase_=4_41_00 , UpperCamelCase_=86 , UpperCamelCase_=20_48 , UpperCamelCase_=0.0 , **UpperCamelCase_ , ) -> Dict: super().__init__( feature_size=UpperCamelCase_ , sampling_rate=UpperCamelCase_ , padding_value=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : str = spectrogram_length __lowercase : int = num_channels __lowercase : List[str] = patch_size __lowercase : Any = feature_size // self.patch_size[1] __lowercase : int = n_fft __lowercase : Optional[int] = sampling_rate // hop_length_to_sampling_rate __lowercase : int = sampling_rate __lowercase : Optional[Any] = padding_value __lowercase : List[Any] = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase_ , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=UpperCamelCase_ , norm='''slaney''' , mel_scale='''slaney''' , ).T def _lowerCamelCase ( self , UpperCamelCase_ ) -> np.ndarray: __lowercase : List[str] = spectrogram( UpperCamelCase_ , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='''dB''' , db_range=8_0.0 , ) __lowercase : int = log_spec[:, :-1] __lowercase : Dict = log_spec - 2_0.0 __lowercase : List[str] = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , **UpperCamelCase_ , ) -> BatchFeature: if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( '''This feature extractor is set to support sampling rate''' F""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled""" F""" with {self.sampling_rate} and not {sampling_rate}.""" ) else: logger.warning( '''It is strongly recommended to pass the `sampling_rate` argument to this function. ''' '''Failing to do so can result in silent errors that might be hard to debug.''' ) __lowercase : Any = isinstance(UpperCamelCase_ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" ) __lowercase : Tuple = is_batched_numpy or ( isinstance(UpperCamelCase_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __lowercase : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase_ , np.ndarray ): __lowercase : List[str] = np.asarray(UpperCamelCase_ , dtype=np.floataa ) elif isinstance(UpperCamelCase_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __lowercase : int = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __lowercase : Any = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __lowercase : Optional[int] = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCamelCase_ ): __lowercase : List[Any] = [np.asarray(UpperCamelCase_ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __lowercase : Optional[int] = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __lowercase : str = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __lowercase : Tuple = np.array(UpperCamelCase_ ).astype(np.floataa ) # convert into correct format for padding __lowercase : List[str] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __lowercase : Optional[Any] = np.ones([len(UpperCamelCase_ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __lowercase : int = padded_audio_features * self.padding_value for i in range(len(UpperCamelCase_ ) ): __lowercase : Tuple = audio_features[i] __lowercase : int = feature # return as BatchFeature if return_attention_mask: __lowercase : List[str] = {'''audio_values''': padded_audio_features, '''audio_mask''': audio_mask} else: __lowercase : Union[str, Any] = {'''audio_values''': padded_audio_features} __lowercase : Any = BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ ) return encoded_inputs
76
"""simple docstring""" from math import pi, sqrt, tan def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __lowercase : int = (sidea + sidea + sidea) / 2 __lowercase : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(F"Rectangle: {area_rectangle(1_0, 2_0) = }") print(F"Square: {area_square(1_0) = }") print(F"Triangle: {area_triangle(1_0, 1_0) = }") print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }") print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }") print(F"Rhombus: {area_rhombus(1_0, 2_0) = }") print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }") print(F"Circle: {area_circle(2_0) = }") print(F"Ellipse: {area_ellipse(1_0, 2_0) = }") print('\nSurface Areas of various geometric shapes: \n') print(F"Cube: {surface_area_cube(2_0) = }") print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }") print(F"Sphere: {surface_area_sphere(2_0) = }") print(F"Hemisphere: {surface_area_hemisphere(2_0) = }") print(F"Cone: {surface_area_cone(1_0, 2_0) = }") print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }") print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }") print(F"Torus: {surface_area_torus(2_0, 1_0) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }") print(F"Square: {area_reg_polygon(4, 1_0) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
76
1
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging a_ = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , **UpperCamelCase_ , ) -> None: super().__init__(**UpperCamelCase_ ) __lowercase : str = size if size is not None else {'''shortest_edge''': 2_24} __lowercase : Tuple = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : int = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowercase : Optional[int] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ , param_name='''crop_size''' ) __lowercase : int = do_resize __lowercase : Any = size __lowercase : int = resample __lowercase : List[Any] = do_center_crop __lowercase : Dict = crop_size __lowercase : Optional[int] = do_rescale __lowercase : str = rescale_factor __lowercase : int = do_normalize __lowercase : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __lowercase : List[str] = image_std if image_std is not None else OPENAI_CLIP_STD __lowercase : Optional[int] = do_convert_rgb def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : List[str] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowercase : Union[str, Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ ) if "height" not in size or "width" not in size: raise ValueError(F"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" ) return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> Union[str, Any]: return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> PIL.Image.Image: __lowercase : Any = do_resize if do_resize is not None else self.do_resize __lowercase : int = size if size is not None else self.size __lowercase : Dict = get_size_dict(UpperCamelCase_ , param_name='''size''' , default_to_square=UpperCamelCase_ ) __lowercase : Dict = resample if resample is not None else self.resample __lowercase : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : str = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(UpperCamelCase_ , param_name='''crop_size''' , default_to_square=UpperCamelCase_ ) __lowercase : Tuple = do_rescale if do_rescale is not None else self.do_rescale __lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Optional[Any] = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Any = image_mean if image_mean is not None else self.image_mean __lowercase : Dict = image_std if image_std is not None else self.image_std __lowercase : List[Any] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __lowercase : int = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # PIL RGBA images are converted to RGB if do_convert_rgb: __lowercase : List[Any] = [convert_to_rgb(UpperCamelCase_ ) for image in images] # All transformations expect numpy arrays. __lowercase : str = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowercase : str = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowercase : List[str] = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowercase : List[Any] = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowercase : Union[str, Any] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowercase : Optional[Any] = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowercase : List[Any] = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # noqa: E741 while r - l > 1: __lowercase : int = (l + r) // 2 if v[m] >= key: __lowercase : Any = m else: __lowercase : List[Any] = m # noqa: E741 return r def __UpperCAmelCase ( __UpperCamelCase ): if len(__UpperCamelCase ) == 0: return 0 __lowercase : List[str] = [0] * len(__UpperCamelCase ) __lowercase : Any = 1 __lowercase : Dict = v[0] for i in range(1 , len(__UpperCamelCase ) ): if v[i] < tail[0]: __lowercase : Tuple = v[i] elif v[i] > tail[length - 1]: __lowercase : Optional[Any] = v[i] length += 1 else: __lowercase : Dict = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Optional[int] = tempfile.mkdtemp() # fmt: off __lowercase : List[str] = ['''''', '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on __lowercase : Tuple = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowercase : Optional[int] = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] __lowercase : List[str] = {'''unk_token''': '''<unk>'''} __lowercase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCamelCase_ ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCamelCase_ ) ) __lowercase : str = { '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } __lowercase : List[Any] = os.path.join(self.tmpdirname , UpperCamelCase_ ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCamelCase_ , UpperCamelCase_ ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Any: return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='''!''' , **UpperCamelCase_ ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> Tuple: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='''!''' , **UpperCamelCase_ ) def _lowerCamelCase ( self , **UpperCamelCase_ ) -> str: return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase_ ) def _lowerCamelCase ( self ) -> List[str]: shutil.rmtree(self.tmpdirname ) def _lowerCamelCase ( self ) -> int: __lowercase : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] __lowercase : Dict = [Image.fromarray(np.moveaxis(UpperCamelCase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowerCamelCase ( self ) -> Tuple: __lowercase : Optional[Any] = self.get_tokenizer() __lowercase : int = self.get_rust_tokenizer() __lowercase : Optional[int] = self.get_image_processor() __lowercase : int = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_slow.save_pretrained(self.tmpdirname ) __lowercase : int = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase_ ) __lowercase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) processor_fast.save_pretrained(self.tmpdirname ) __lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase_ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase_ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Any: __lowercase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __lowercase : Optional[int] = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) __lowercase : Any = self.get_image_processor(do_normalize=UpperCamelCase_ ) __lowercase : Dict = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCamelCase_ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase_ ) def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : List[str] = self.get_image_processor() __lowercase : Any = self.get_tokenizer() __lowercase : List[str] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : Dict = self.prepare_image_inputs() __lowercase : List[Any] = image_processor(UpperCamelCase_ , return_tensors='''np''' ) __lowercase : Optional[Any] = processor(images=UpperCamelCase_ , return_tensors='''np''' ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowerCamelCase ( self ) -> str: __lowercase : Dict = self.get_image_processor() __lowercase : Optional[int] = self.get_tokenizer() __lowercase : List[str] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : Optional[int] = '''lower newer''' __lowercase : Any = processor(text=UpperCamelCase_ , return_tensors='''np''' ) __lowercase : Optional[Any] = tokenizer(UpperCamelCase_ , return_tensors='''np''' ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : int = self.get_image_processor() __lowercase : Tuple = self.get_tokenizer() __lowercase : Optional[int] = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : str = '''lower newer''' __lowercase : Tuple = self.prepare_image_inputs() __lowercase : Optional[Any] = processor(text=UpperCamelCase_ , images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def _lowerCamelCase ( self ) -> Tuple: __lowercase : int = '''google/owlvit-base-patch32''' __lowercase : Optional[Any] = OwlViTProcessor.from_pretrained(UpperCamelCase_ ) __lowercase : Tuple = ['''cat''', '''nasa badge'''] __lowercase : Tuple = processor(text=UpperCamelCase_ ) __lowercase : Union[str, Any] = 16 self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def _lowerCamelCase ( self ) -> Any: __lowercase : Tuple = '''google/owlvit-base-patch32''' __lowercase : int = OwlViTProcessor.from_pretrained(UpperCamelCase_ ) __lowercase : int = [['''cat''', '''nasa badge'''], ['''person''']] __lowercase : Tuple = processor(text=UpperCamelCase_ ) __lowercase : Tuple = 16 __lowercase : Any = len(UpperCamelCase_ ) __lowercase : Optional[Any] = max([len(UpperCamelCase_ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : List[Any] = '''google/owlvit-base-patch32''' __lowercase : Tuple = OwlViTProcessor.from_pretrained(UpperCamelCase_ ) __lowercase : str = ['''cat''', '''nasa badge'''] __lowercase : Optional[Any] = processor(text=UpperCamelCase_ ) __lowercase : Tuple = 16 __lowercase : int = inputs['''input_ids'''] __lowercase : List[Any] = [ [4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask'''] ) self.assertEqual(inputs['''input_ids'''].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _lowerCamelCase ( self ) -> Dict: __lowercase : Optional[int] = self.get_image_processor() __lowercase : List[Any] = self.get_tokenizer() __lowercase : Any = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : Union[str, Any] = self.prepare_image_inputs() __lowercase : List[Any] = self.prepare_image_inputs() __lowercase : Tuple = processor(images=UpperCamelCase_ , query_images=UpperCamelCase_ ) self.assertListEqual(list(inputs.keys() ) , ['''query_pixel_values''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase_ ): processor() def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Optional[int] = self.get_image_processor() __lowercase : Union[str, Any] = self.get_tokenizer() __lowercase : Tuple = OwlViTProcessor(tokenizer=UpperCamelCase_ , image_processor=UpperCamelCase_ ) __lowercase : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __lowercase : Any = processor.batch_decode(UpperCamelCase_ ) __lowercase : List[Any] = tokenizer.batch_decode(UpperCamelCase_ ) self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase = 4 ): __lowercase : Dict = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Union[str, Any] = matrix[::-1] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [x[::-1] for x in matrix] return matrix def __UpperCAmelCase ( __UpperCamelCase ): for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices a_ = logging.get_logger(__name__) a_ = { 'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json', # See all Dinat models at https://huggingface.co/models?filter=dinat } class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase ="dinat" UpperCamelCase ={ "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers", } def __init__( self , UpperCamelCase_=4 , UpperCamelCase_=3 , UpperCamelCase_=64 , UpperCamelCase_=[3, 4, 6, 5] , UpperCamelCase_=[2, 4, 8, 16] , UpperCamelCase_=7 , UpperCamelCase_=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase_=3.0 , UpperCamelCase_=True , UpperCamelCase_=0.0 , UpperCamelCase_=0.0 , UpperCamelCase_=0.1 , UpperCamelCase_="gelu" , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-5 , UpperCamelCase_=0.0 , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Optional[int]: super().__init__(**UpperCamelCase_ ) __lowercase : List[Any] = patch_size __lowercase : List[str] = num_channels __lowercase : Union[str, Any] = embed_dim __lowercase : Tuple = depths __lowercase : Optional[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = num_heads __lowercase : Union[str, Any] = kernel_size __lowercase : Optional[Any] = dilations __lowercase : Union[str, Any] = mlp_ratio __lowercase : Union[str, Any] = qkv_bias __lowercase : Any = hidden_dropout_prob __lowercase : Optional[Any] = attention_probs_dropout_prob __lowercase : Optional[int] = drop_path_rate __lowercase : List[Any] = hidden_act __lowercase : Dict = layer_norm_eps __lowercase : Dict = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __lowercase : Optional[Any] = int(embed_dim * 2 ** (len(UpperCamelCase_ ) - 1) ) __lowercase : Any = layer_scale_init_value __lowercase : Tuple = ['''stem'''] + [F"""stage{idx}""" for idx in range(1 , len(UpperCamelCase_ ) + 1 )] __lowercase ,__lowercase : List[str] = get_aligned_output_features_output_indices( out_features=UpperCamelCase_ , out_indices=UpperCamelCase_ , stage_names=self.stage_names )
76
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(snake_case ) class UpperCAmelCase_ : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __lowercase : int = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __lowercase : str = len(UpperCamelCase_ ) __lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" ) __lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : Optional[Any] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __lowercase : str = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : List[str] = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]: __lowercase : List[Any] = reader_input['''input_ids'''] __lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3] __lowercase : Optional[int] = len(UpperCamelCase_ ) __lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : Any = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __lowercase : List[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]: __lowercase : Tuple = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __lowercase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowercase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase =["input_ids", "attention_mask"]
76
1
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType a_ = logging.get_logger(__name__) a_ = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="deberta-v2" def __init__( self , UpperCamelCase_=12_81_00 , UpperCamelCase_=15_36 , UpperCamelCase_=24 , UpperCamelCase_=24 , UpperCamelCase_=61_44 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=0 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-7 , UpperCamelCase_=False , UpperCamelCase_=-1 , UpperCamelCase_=0 , UpperCamelCase_=True , UpperCamelCase_=None , UpperCamelCase_=0 , UpperCamelCase_="gelu" , **UpperCamelCase_ , ) -> List[Any]: super().__init__(**UpperCamelCase_ ) __lowercase : List[Any] = hidden_size __lowercase : Tuple = num_hidden_layers __lowercase : Optional[int] = num_attention_heads __lowercase : Dict = intermediate_size __lowercase : List[str] = hidden_act __lowercase : str = hidden_dropout_prob __lowercase : List[Any] = attention_probs_dropout_prob __lowercase : Optional[Any] = max_position_embeddings __lowercase : Optional[int] = type_vocab_size __lowercase : Optional[int] = initializer_range __lowercase : Union[str, Any] = relative_attention __lowercase : Tuple = max_relative_positions __lowercase : int = pad_token_id __lowercase : Optional[Any] = position_biased_input # Backwards compatibility if type(UpperCamelCase_ ) == str: __lowercase : Union[str, Any] = [x.strip() for x in pos_att_type.lower().split('''|''' )] __lowercase : str = pos_att_type __lowercase : Any = vocab_size __lowercase : List[Any] = layer_norm_eps __lowercase : Tuple = kwargs.get('''pooler_hidden_size''' , UpperCamelCase_ ) __lowercase : Dict = pooler_dropout __lowercase : List[str] = pooler_hidden_act class UpperCAmelCase_ ( snake_case ): @property def _lowerCamelCase ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": __lowercase : Any = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __lowercase : Union[str, Any] = {0: '''batch''', 1: '''sequence'''} if self._config.type_vocab_size > 0: return OrderedDict( [('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis)] ) else: return OrderedDict([('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis)] ) @property def _lowerCamelCase ( self ) -> int: return 12 def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = -1 , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 3 , UpperCamelCase_ = 40 , UpperCamelCase_ = 40 , UpperCamelCase_ = None , ) -> Mapping[str, Any]: __lowercase : Any = super().generate_dummy_inputs(preprocessor=UpperCamelCase_ , framework=UpperCamelCase_ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = 0 for ch in input_str: __lowercase : Union[str, Any] = ord(__UpperCamelCase ) __lowercase : Optional[int] = pow(2 , __UpperCamelCase ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __UpperCAmelCase ( __UpperCamelCase ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __lowercase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __lowercase : Dict = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __lowercase : Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __lowercase : Tuple = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __lowercase : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __lowercase : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __lowercase : Optional[int] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __lowercase : Union[str, Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __lowercase : str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __lowercase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __lowercase : str = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __lowercase : Dict = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __lowercase : Union[str, Any] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __lowercase : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' ) __lowercase : Any = key.replace('''image_projection''' , '''flava.image_projection''' ) __lowercase : Tuple = value.float() for key, value in codebook_state_dict.items(): __lowercase : int = value return upgrade @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): if config_path is not None: __lowercase : Union[str, Any] = FlavaConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : Union[str, Any] = FlavaConfig() __lowercase : Any = FlavaForPreTraining(__UpperCamelCase ).eval() __lowercase : Any = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): __lowercase : Optional[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Optional[int] = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) __lowercase : Union[str, Any] = hf_model.state_dict() __lowercase : Optional[Any] = count_parameters(__UpperCamelCase ) __lowercase : List[Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
76
1
"""simple docstring""" from __future__ import annotations from typing import TypedDict class UpperCAmelCase_ ( snake_case ): UpperCamelCase =42 UpperCamelCase =42 def __UpperCAmelCase ( __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) return [s[i:] + s[:i] for i in range(len(__UpperCamelCase ) )] def __UpperCAmelCase ( __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('''The parameter s type must be str.''' ) if not s: raise ValueError('''The parameter s must not be empty.''' ) __lowercase : Tuple = all_rotations(__UpperCamelCase ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation __lowercase : BWTTransformDict = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(__UpperCamelCase ), } return response def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ): raise TypeError('''The parameter bwt_string type must be str.''' ) if not bwt_string: raise ValueError('''The parameter bwt_string must not be empty.''' ) try: __lowercase : str = int(__UpperCamelCase ) except ValueError: raise TypeError( '''The parameter idx_original_string type must be int or passive''' ''' of cast to int.''' ) if idx_original_string < 0: raise ValueError('''The parameter idx_original_string must not be lower than 0.''' ) if idx_original_string >= len(__UpperCamelCase ): raise ValueError( '''The parameter idx_original_string must be lower than''' ''' len(bwt_string).''' ) __lowercase : List[Any] = [''''''] * len(__UpperCamelCase ) for _ in range(len(__UpperCamelCase ) ): for i in range(len(__UpperCamelCase ) ): __lowercase : List[str] = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": a_ = 'Provide a string that I will generate its BWT transform: ' a_ = input(entry_msg).strip() a_ = bwt_transform(s) print( F"Burrows Wheeler transform for string '{s}' results " F"in '{result['bwt_string']}'" ) a_ = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( F"Reversing Burrows Wheeler transform for entry '{result['bwt_string']}' " F"we get original string '{original_string}'" )
76
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: super().__init__(**UpperCamelCase_ ) __lowercase : List[str] = size if size is not None else {'''shortest_edge''': 2_56} __lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowercase : Dict = get_size_dict(UpperCamelCase_ ) __lowercase : Dict = do_resize __lowercase : Optional[Any] = size __lowercase : List[Any] = resample __lowercase : Dict = do_center_crop __lowercase : Any = crop_size __lowercase : List[str] = do_rescale __lowercase : List[str] = rescale_factor __lowercase : Optional[Any] = do_normalize __lowercase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : List[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ) -> np.ndarray: return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> Optional[Any]: __lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Tuple = size if size is not None else self.size __lowercase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[str] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(UpperCamelCase_ ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : Any = image_std if image_std is not None else self.image_std __lowercase : Any = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowercase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowercase : Any = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowercase : str = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowercase : Optional[int] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowercase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowercase : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
76
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="cvt" def __init__( self , UpperCamelCase_=3 , UpperCamelCase_=[7, 3, 3] , UpperCamelCase_=[4, 2, 2] , UpperCamelCase_=[2, 1, 1] , UpperCamelCase_=[64, 1_92, 3_84] , UpperCamelCase_=[1, 3, 6] , UpperCamelCase_=[1, 2, 10] , UpperCamelCase_=[4.0, 4.0, 4.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.0] , UpperCamelCase_=[0.0, 0.0, 0.1] , UpperCamelCase_=[True, True, True] , UpperCamelCase_=[False, False, True] , UpperCamelCase_=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase_=[3, 3, 3] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[2, 2, 2] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=[1, 1, 1] , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , **UpperCamelCase_ , ) -> Union[str, Any]: super().__init__(**UpperCamelCase_ ) __lowercase : List[Any] = num_channels __lowercase : List[Any] = patch_sizes __lowercase : List[str] = patch_stride __lowercase : int = patch_padding __lowercase : List[Any] = embed_dim __lowercase : Any = num_heads __lowercase : List[str] = depth __lowercase : Tuple = mlp_ratio __lowercase : Union[str, Any] = attention_drop_rate __lowercase : List[str] = drop_rate __lowercase : Optional[Any] = drop_path_rate __lowercase : List[str] = qkv_bias __lowercase : Any = cls_token __lowercase : Dict = qkv_projection_method __lowercase : Dict = kernel_qkv __lowercase : Union[str, Any] = padding_kv __lowercase : List[str] = stride_kv __lowercase : List[str] = padding_q __lowercase : Any = stride_q __lowercase : int = initializer_range __lowercase : Dict = layer_norm_eps
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if digit_amount > 0: return round(number - int(__UpperCamelCase ) , __UpperCamelCase ) return number - int(__UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
1
"""simple docstring""" from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) a_ = logging.get_logger(__name__) # pylint: disable=invalid-name a_ = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=8 ): __lowercase : Optional[int] = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __lowercase : Union[str, Any] = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> Union[str, Any]: super().__init__() self.register_modules( unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , movq=UpperCamelCase_ , ) __lowercase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[Any]: if latents is None: __lowercase : Optional[int] = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=UpperCamelCase_ , dtype=UpperCamelCase_ ) else: if latents.shape != shape: raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {shape}""" ) __lowercase : Optional[Any] = latents.to(UpperCamelCase_ ) __lowercase : Optional[Any] = latents * scheduler.init_noise_sigma return latents def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Optional[int]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('''Please install accelerate via `pip install accelerate`''' ) __lowercase : Tuple = torch.device(F"""cuda:{gpu_id}""" ) __lowercase : Any = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase_ , UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=0 ) -> Any: if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ): from accelerate import cpu_offload_with_hook else: raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' ) __lowercase : str = torch.device(F"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('''cpu''' , silence_dtype_warnings=UpperCamelCase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __lowercase : Optional[int] = None for cpu_offloaded_model in [self.unet, self.movq]: __lowercase ,__lowercase : Any = cpu_offload_with_hook(UpperCamelCase_ , UpperCamelCase_ , prev_module_hook=UpperCamelCase_ ) # We'll offload the last model manually. __lowercase : List[str] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowerCamelCase ( self ) -> List[Any]: if not hasattr(self.unet , '''_hf_hook''' ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase_ , '''_hf_hook''' ) and hasattr(module._hf_hook , '''execution_device''' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase_ ) def __call__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 5_12 , UpperCamelCase_ = 1_00 , UpperCamelCase_ = 4.0 , UpperCamelCase_ = 1 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = "pil" , UpperCamelCase_ = True , ) -> Union[str, Any]: __lowercase : Optional[int] = self._execution_device __lowercase : int = guidance_scale > 1.0 if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowercase : Tuple = torch.cat(UpperCamelCase_ , dim=0 ) __lowercase : List[str] = image_embeds.shape[0] * num_images_per_prompt if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowercase : Any = torch.cat(UpperCamelCase_ , dim=0 ) if do_classifier_free_guidance: __lowercase : Any = image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowercase : List[str] = negative_image_embeds.repeat_interleave(UpperCamelCase_ , dim=0 ) __lowercase : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase_ ) self.scheduler.set_timesteps(UpperCamelCase_ , device=UpperCamelCase_ ) __lowercase : Dict = self.scheduler.timesteps __lowercase : List[Any] = self.unet.config.in_channels __lowercase ,__lowercase : Any = downscale_height_and_width(UpperCamelCase_ , UpperCamelCase_ , self.movq_scale_factor ) # create initial latent __lowercase : int = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase_ ) ): # expand the latents if we are doing classifier free guidance __lowercase : Any = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __lowercase : List[Any] = {'''image_embeds''': image_embeds} __lowercase : Dict = self.unet( sample=UpperCamelCase_ , timestep=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , added_cond_kwargs=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0] if do_classifier_free_guidance: __lowercase ,__lowercase : List[str] = noise_pred.split(latents.shape[1] , dim=1 ) __lowercase ,__lowercase : Optional[int] = noise_pred.chunk(2 ) __lowercase ,__lowercase : int = variance_pred.chunk(2 ) __lowercase : List[Any] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __lowercase : Union[str, Any] = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , '''variance_type''' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __lowercase ,__lowercase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __lowercase : int = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ , )[0] # post-processing __lowercase : Dict = self.movq.decode(UpperCamelCase_ , force_not_quantize=UpperCamelCase_ )['''sample'''] if output_type not in ["pt", "np", "pil"]: raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: __lowercase : int = image * 0.5 + 0.5 __lowercase : int = image.clamp(0 , 1 ) __lowercase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __lowercase : Optional[Any] = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowercase : set[int] = set() return any( node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for node in graph ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): visited.add(__UpperCamelCase ) rec_stk.add(__UpperCamelCase ) for node in graph[vertex]: if node not in visited: if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__UpperCamelCase ) return False if __name__ == "__main__": from doctest import testmod testmod()
76
1
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Union[str, Any] = list(range(len(__UpperCamelCase ) ) ) __lowercase : List[Any] = [v / w for v, w in zip(__UpperCamelCase , __UpperCamelCase )] index.sort(key=lambda __UpperCamelCase : ratio[i] , reverse=__UpperCamelCase ) __lowercase : float = 0 __lowercase : list[float] = [0] * len(__UpperCamelCase ) for i in index: if weight[i] <= capacity: __lowercase : Tuple = 1 max_value += value[i] capacity -= weight[i] else: __lowercase : Any = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a_ = logging.getLogger(__name__) class UpperCAmelCase_ ( snake_case ): def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]: __lowercase : Tuple = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] ) __lowercase : Any = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> int: super().__init__(UpperCamelCase_ ) __lowercase : Optional[Any] = BertEncoderWithPabee(UpperCamelCase_ ) self.init_weights() __lowercase : str = 0 __lowercase : Optional[Any] = 0 __lowercase : Optional[int] = 0 __lowercase : int = 0 def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = threshold def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Optional[int] = patience def _lowerCamelCase ( self ) -> List[str]: __lowercase : Tuple = 0 __lowercase : Tuple = 0 def _lowerCamelCase ( self ) -> List[Any]: __lowercase : Optional[int] = self.inference_layers_num / self.inference_instances_num __lowercase : int = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: __lowercase : Tuple = input_ids.size() elif inputs_embeds is not None: __lowercase : List[Any] = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) __lowercase : int = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: __lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __lowercase ,__lowercase ,__lowercase : Optional[int] = encoder_hidden_states.size() __lowercase : Any = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __lowercase : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) __lowercase : Tuple = self.invert_attention_mask(UpperCamelCase_ ) else: __lowercase : Tuple = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) __lowercase : Optional[int] = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) __lowercase : Union[str, Any] = embedding_output if self.training: __lowercase : List[Any] = [] for i in range(self.config.num_hidden_layers ): __lowercase : str = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : int = self.pooler(UpperCamelCase_ ) __lowercase : str = output_layers[i](output_dropout(UpperCamelCase_ ) ) res.append(UpperCamelCase_ ) elif self.patience == 0: # Use all layers for inference __lowercase : int = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) __lowercase : Optional[Any] = self.pooler(encoder_outputs[0] ) __lowercase : int = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )] else: __lowercase : Optional[int] = 0 __lowercase : Union[str, Any] = None __lowercase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __lowercase : Tuple = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : Dict = self.pooler(UpperCamelCase_ ) __lowercase : Optional[int] = output_layers[i](UpperCamelCase_ ) if regression: __lowercase : Any = logits.detach() if patient_result is not None: __lowercase : List[str] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __lowercase : int = 0 else: __lowercase : List[str] = logits.detach().argmax(dim=1 ) if patient_result is not None: __lowercase : Optional[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ): patient_counter += 1 else: __lowercase : Tuple = 0 __lowercase : Union[str, Any] = logits if patient_counter == self.patience: break __lowercase : Optional[int] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> Optional[Any]: super().__init__(UpperCamelCase_ ) __lowercase : List[Any] = config.num_labels __lowercase : int = BertModelWithPabee(UpperCamelCase_ ) __lowercase : int = nn.Dropout(config.hidden_dropout_prob ) __lowercase : Union[str, Any] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> int: __lowercase : Union[str, Any] = self.bert( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __lowercase : List[str] = (logits[-1],) if labels is not None: __lowercase : Any = None __lowercase : Optional[int] = 0 for ix, logits_item in enumerate(UpperCamelCase_ ): if self.num_labels == 1: # We are doing regression __lowercase : Any = MSELoss() __lowercase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __lowercase : str = CrossEntropyLoss() __lowercase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __lowercase : List[str] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __lowercase : Union[str, Any] = (total_loss / total_weights,) + outputs return outputs
76
1
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets a_ = datasets.logging.get_logger(__name__) a_ = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' a_ = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' a_ = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n' a_ = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def _lowerCamelCase ( self ) -> List[str]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''predictions''': datasets.Value('''string''' , id='''sequence''' ), '''references''': datasets.Value('''string''' , id='''sequence''' ), } ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple: # check that config name specifies a valid BLEURT model if self.config_name == "default": logger.warning( '''Using default BLEURT-Base checkpoint for sequence maximum length 128. ''' '''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' ) __lowercase : Union[str, Any] = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: __lowercase : List[str] = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __lowercase : int = self.config_name.upper() else: raise KeyError( F"""{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}""" ) # download the model checkpoint specified by self.config_name and set up the scorer __lowercase : str = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __lowercase : Optional[int] = score.BleurtScorer(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> int: __lowercase : List[Any] = self.scorer.score(references=UpperCamelCase_ , candidates=UpperCamelCase_ ) return {"scores": scores}
76
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split('''.''' ): __lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __lowercase : int = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase : List[str] = value elif weight_type == "weight_g": __lowercase : Optional[Any] = value elif weight_type == "weight_v": __lowercase : Tuple = value elif weight_type == "bias": __lowercase : Dict = value else: __lowercase : Union[str, Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Tuple = [] __lowercase : Union[str, Any] = fairseq_model.state_dict() __lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __lowercase : int = True if "*" in mapped_key: __lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2] __lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __lowercase : Tuple = '''weight_g''' elif "weight_v" in name: __lowercase : Optional[int] = '''weight_v''' elif "weight" in name: __lowercase : str = '''weight''' elif "bias" in name: __lowercase : Optional[int] = '''bias''' else: __lowercase : List[str] = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1] __lowercase : str = name.split('''.''' ) __lowercase : Dict = int(items[0] ) __lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): if config_path is not None: __lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : str = HubertConfig() if is_finetuned: if dict_path: __lowercase : Tuple = Dictionary.load(__UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : int = target_dict.pad_index __lowercase : Union[str, Any] = target_dict.bos_index __lowercase : int = target_dict.eos_index __lowercase : int = len(target_dict.symbols ) __lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' ) if not os.path.isdir(__UpperCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) ) return os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __UpperCamelCase ) __lowercase : str = WavaVecaCTCTokenizer( __UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , ) __lowercase : str = True if config.feat_extract_norm == '''layer''' else False __lowercase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ) __lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) __lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase ) else: __lowercase : Union[str, Any] = HubertModel(__UpperCamelCase ) if is_finetuned: __lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) a_ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
76
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) a_ = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = ['ConvNextFeatureExtractor'] a_ = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
76
"""simple docstring""" a_ = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
76
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available a_ = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor from .base import PipelineTool class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="openai/whisper-base" UpperCamelCase =( "This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the " "transcribed text." ) UpperCamelCase ="transcriber" UpperCamelCase =WhisperProcessor UpperCamelCase =WhisperForConditionalGeneration UpperCamelCase =["audio"] UpperCamelCase =["text"] def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: return self.pre_processor(UpperCamelCase_ , return_tensors='''pt''' ).input_features def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.model.generate(inputs=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.pre_processor.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )[0]
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return int((input_a, input_a).count(0 ) == 0 ) def __UpperCAmelCase ( ): assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
76
"""simple docstring""" import gc import threading import time import psutil import torch class UpperCAmelCase_ : def __init__( self ) -> str: __lowercase : List[Any] = psutil.Process() __lowercase : Any = False def _lowerCamelCase ( self ) -> Union[str, Any]: __lowercase : Optional[Any] = -1 while True: __lowercase : List[str] = max(self.process.memory_info().rss , self.cpu_memory_peak ) # can't sleep or will not catch the peak right (this comment is here on purpose) if not self.peak_monitoring: break def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : List[Any] = True __lowercase : List[Any] = threading.Thread(target=self.peak_monitor ) __lowercase : Optional[int] = True self.thread.start() def _lowerCamelCase ( self ) -> Optional[Any]: __lowercase : Union[str, Any] = False self.thread.join() return self.cpu_memory_peak a_ = PeakCPUMemory() def __UpperCAmelCase ( ): # Time __lowercase : Union[str, Any] = {'''time''': time.time()} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : List[Any] = psutil.Process().memory_info().rss cpu_peak_tracker.start() # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : List[str] = torch.cuda.memory_allocated(__UpperCamelCase ) torch.cuda.reset_peak_memory_stats() return measures def __UpperCAmelCase ( __UpperCamelCase ): # Time __lowercase : List[Any] = {'''time''': time.time() - start_measures['''time''']} gc.collect() torch.cuda.empty_cache() # CPU mem __lowercase : Union[str, Any] = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20 __lowercase : Dict = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20 # GPU mem for i in range(torch.cuda.device_count() ): __lowercase : str = (torch.cuda.memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 __lowercase : Optional[int] = (torch.cuda.max_memory_allocated(__UpperCamelCase ) - start_measures[str(__UpperCamelCase )]) / 2**20 return measures def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): print(f"""{description}:""" ) print(f"""- Time: {measures["time"]:.2f}s""" ) for i in range(torch.cuda.device_count() ): print(f"""- GPU {i} allocated: {measures[str(__UpperCamelCase )]:.2f}MiB""" ) __lowercase : Dict = measures[f"""{i}-peak"""] print(f"""- GPU {i} peak: {peak:.2f}MiB""" ) print(f"""- CPU RAM allocated: {measures["cpu"]:.2f}MiB""" ) print(f"""- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB""" )
76
1
"""simple docstring""" import numpy as np import datasets a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def _lowerCamelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: # convert to numpy arrays __lowercase : Dict = np.array(UpperCamelCase_ ) __lowercase : str = np.array(UpperCamelCase_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction __lowercase : Tuple = X - np.mean(UpperCamelCase_ ) __lowercase : List[Any] = np.cov(reference_distribution.T ) try: __lowercase : Tuple = np.linalg.inv(UpperCamelCase_ ) except np.linalg.LinAlgError: __lowercase : str = np.linalg.pinv(UpperCamelCase_ ) __lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
76
"""simple docstring""" import numpy as np import datasets a_ = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' a_ = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' a_ = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): def _lowerCamelCase ( self ) -> List[Any]: return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { '''X''': datasets.Sequence(datasets.Value('''float''' , id='''sequence''' ) , id='''X''' ), } ) , ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple: # convert to numpy arrays __lowercase : Dict = np.array(UpperCamelCase_ ) __lowercase : str = np.array(UpperCamelCase_ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError('''Expected `X` to be a 2D vector''' ) if len(reference_distribution.shape ) != 2: raise ValueError('''Expected `reference_distribution` to be a 2D vector''' ) if reference_distribution.shape[0] < 2: raise ValueError( '''Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension''' ) # Get mahalanobis distance for each prediction __lowercase : Tuple = X - np.mean(UpperCamelCase_ ) __lowercase : List[Any] = np.cov(reference_distribution.T ) try: __lowercase : Tuple = np.linalg.inv(UpperCamelCase_ ) except np.linalg.LinAlgError: __lowercase : str = np.linalg.pinv(UpperCamelCase_ ) __lowercase : Any = np.dot(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : Optional[Any] = np.dot(UpperCamelCase_ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
76
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : int = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: __lowercase : str = 1_92 __lowercase : Tuple = 7_68 __lowercase : str = 12 __lowercase : str = 3 __lowercase : Optional[Any] = [8_00, 13_33] __lowercase : Optional[Any] = False elif yolos_name == "yolos_s_dWr": __lowercase : Any = 3_30 __lowercase : List[str] = 14 __lowercase : Tuple = 6 __lowercase : Any = 13_20 elif "yolos_s" in yolos_name: __lowercase : Tuple = 3_84 __lowercase : Any = 15_36 __lowercase : List[str] = 12 __lowercase : Dict = 6 elif "yolos_b" in yolos_name: __lowercase : int = [8_00, 13_44] __lowercase : Any = 91 __lowercase : str = '''huggingface/label-files''' __lowercase : str = '''coco-detection-id2label.json''' __lowercase : Optional[Any] = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) __lowercase : List[Any] = {int(__UpperCamelCase ): v for k, v in idalabel.items()} __lowercase : str = idalabel __lowercase : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ): for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowercase : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) __lowercase : int = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowercase : Tuple = in_proj_weight[: config.hidden_size, :] __lowercase : Any = in_proj_bias[: config.hidden_size] __lowercase : Tuple = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowercase : Optional[int] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowercase : Any = in_proj_weight[-config.hidden_size :, :] __lowercase : List[str] = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( __UpperCamelCase ): if "backbone" in name: __lowercase : List[str] = name.replace('''backbone''' , '''vit''' ) if "cls_token" in name: __lowercase : Tuple = name.replace('''cls_token''' , '''embeddings.cls_token''' ) if "det_token" in name: __lowercase : Optional[Any] = name.replace('''det_token''' , '''embeddings.detection_tokens''' ) if "mid_pos_embed" in name: __lowercase : List[str] = name.replace('''mid_pos_embed''' , '''encoder.mid_position_embeddings''' ) if "pos_embed" in name: __lowercase : Dict = name.replace('''pos_embed''' , '''embeddings.position_embeddings''' ) if "patch_embed.proj" in name: __lowercase : Optional[int] = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' ) if "blocks" in name: __lowercase : Tuple = name.replace('''blocks''' , '''encoder.layer''' ) if "attn.proj" in name: __lowercase : Optional[Any] = name.replace('''attn.proj''' , '''attention.output.dense''' ) if "attn" in name: __lowercase : Optional[int] = name.replace('''attn''' , '''attention.self''' ) if "norm1" in name: __lowercase : Tuple = name.replace('''norm1''' , '''layernorm_before''' ) if "norm2" in name: __lowercase : Dict = name.replace('''norm2''' , '''layernorm_after''' ) if "mlp.fc1" in name: __lowercase : Optional[Any] = name.replace('''mlp.fc1''' , '''intermediate.dense''' ) if "mlp.fc2" in name: __lowercase : int = name.replace('''mlp.fc2''' , '''output.dense''' ) if "class_embed" in name: __lowercase : Union[str, Any] = name.replace('''class_embed''' , '''class_labels_classifier''' ) if "bbox_embed" in name: __lowercase : Dict = name.replace('''bbox_embed''' , '''bbox_predictor''' ) if "vit.norm" in name: __lowercase : Optional[int] = name.replace('''vit.norm''' , '''vit.layernorm''' ) return name def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): for key in orig_state_dict.copy().keys(): __lowercase : Tuple = orig_state_dict.pop(__UpperCamelCase ) if "qkv" in key: __lowercase : List[Any] = key.split('''.''' ) __lowercase : Union[str, Any] = int(key_split[2] ) __lowercase : Union[str, Any] = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: __lowercase : str = val[:dim, :] __lowercase : int = val[ dim : dim * 2, : ] __lowercase : Union[str, Any] = val[-dim:, :] else: __lowercase : Any = val[:dim] __lowercase : Optional[Any] = val[dim : dim * 2] __lowercase : int = val[-dim:] else: __lowercase : Tuple = val return orig_state_dict def __UpperCAmelCase ( ): __lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowercase : str = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = False ): __lowercase : List[Any] = get_yolos_config(__UpperCamelCase ) # load original state_dict __lowercase : Optional[int] = torch.load(__UpperCamelCase , map_location='''cpu''' )['''model'''] # load 🤗 model __lowercase : Any = YolosForObjectDetection(__UpperCamelCase ) model.eval() __lowercase : int = convert_state_dict(__UpperCamelCase , __UpperCamelCase ) model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by YolosImageProcessor __lowercase : List[str] = 8_00 if yolos_name != '''yolos_ti''' else 5_12 __lowercase : Union[str, Any] = YolosImageProcessor(format='''coco_detection''' , size=__UpperCamelCase ) __lowercase : Dict = image_processor(images=prepare_img() , return_tensors='''pt''' ) __lowercase : str = model(**__UpperCamelCase ) __lowercase ,__lowercase : Dict = outputs.logits, outputs.pred_boxes __lowercase ,__lowercase : Dict = None, None if yolos_name == "yolos_ti": __lowercase : Optional[int] = torch.tensor( [[-39.5_022, -11.9_820, -17.6_888], [-29.9_574, -9.9_769, -17.7_691], [-42.3_281, -20.7_200, -30.6_294]] ) __lowercase : Dict = torch.tensor( [[0.4_021, 0.0_836, 0.7_979], [0.0_184, 0.2_609, 0.0_364], [0.1_781, 0.2_004, 0.2_095]] ) elif yolos_name == "yolos_s_200_pre": __lowercase : Union[str, Any] = torch.tensor( [[-24.0_248, -10.3_024, -14.8_290], [-42.0_392, -16.8_200, -27.4_334], [-27.2_743, -11.8_154, -18.7_148]] ) __lowercase : Tuple = torch.tensor( [[0.2_559, 0.5_455, 0.4_706], [0.2_989, 0.7_279, 0.1_875], [0.7_732, 0.4_017, 0.4_462]] ) elif yolos_name == "yolos_s_300_pre": __lowercase : Union[str, Any] = torch.tensor( [[-36.2_220, -14.4_385, -23.5_457], [-35.6_970, -14.7_583, -21.3_935], [-31.5_939, -13.6_042, -16.8_049]] ) __lowercase : List[str] = torch.tensor( [[0.7_614, 0.2_316, 0.4_728], [0.7_168, 0.4_495, 0.3_855], [0.4_996, 0.1_466, 0.9_996]] ) elif yolos_name == "yolos_s_dWr": __lowercase : Dict = torch.tensor( [[-42.8_668, -24.1_049, -41.1_690], [-34.7_456, -14.1_274, -24.9_194], [-33.7_898, -12.1_946, -25.6_495]] ) __lowercase : str = torch.tensor( [[0.5_587, 0.2_773, 0.0_605], [0.5_004, 0.3_014, 0.9_994], [0.4_999, 0.1_548, 0.9_994]] ) elif yolos_name == "yolos_base": __lowercase : int = torch.tensor( [[-40.6_064, -24.3_084, -32.6_447], [-55.1_990, -30.7_719, -35.5_877], [-51.4_311, -33.3_507, -35.6_462]] ) __lowercase : Optional[Any] = torch.tensor( [[0.5_555, 0.2_794, 0.0_655], [0.9_049, 0.2_664, 0.1_894], [0.9_183, 0.1_984, 0.1_635]] ) else: raise ValueError(f"""Unknown yolos_name: {yolos_name}""" ) assert torch.allclose(logits[0, :3, :3] , __UpperCamelCase , atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3] , __UpperCamelCase , atol=1e-4 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f"""Saving model {yolos_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__UpperCamelCase ) if push_to_hub: __lowercase : Any = { '''yolos_ti''': '''yolos-tiny''', '''yolos_s_200_pre''': '''yolos-small''', '''yolos_s_300_pre''': '''yolos-small-300''', '''yolos_s_dWr''': '''yolos-small-dwr''', '''yolos_base''': '''yolos-base''', } print('''Pushing to the hub...''' ) __lowercase : Union[str, Any] = model_mapping[yolos_name] image_processor.push_to_hub(__UpperCamelCase , organization='''hustvl''' ) model.push_to_hub(__UpperCamelCase , organization='''hustvl''' ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) a_ = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
76
"""simple docstring""" a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/' def __UpperCAmelCase ( __UpperCamelCase ): # Make sure the supplied data is a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : str = f"""a bytes-like object is required, not '{data.__class__.__name__}'""" raise TypeError(__UpperCamelCase ) __lowercase : Any = ''''''.join(bin(__UpperCamelCase )[2:].zfill(8 ) for byte in data ) __lowercase : List[str] = len(__UpperCamelCase ) % 6 != 0 if padding_needed: # The padding that will be added later __lowercase : int = B'''=''' * ((6 - len(__UpperCamelCase ) % 6) // 2) # Append binary_stream with arbitrary binary digits (0's by default) to make its # length a multiple of 6. binary_stream += "0" * (6 - len(__UpperCamelCase ) % 6) else: __lowercase : Any = B'''''' # Encode every 6 binary digits to their corresponding Base64 character return ( "".join( B64_CHARSET[int(binary_stream[index : index + 6] , 2 )] for index in range(0 , len(__UpperCamelCase ) , 6 ) ).encode() + padding ) def __UpperCAmelCase ( __UpperCamelCase ): # Make sure encoded_data is either a string or a bytes-like object if not isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : List[str] = ( '''argument should be a bytes-like object or ASCII string, ''' f"""not '{encoded_data.__class__.__name__}'""" ) raise TypeError(__UpperCamelCase ) # In case encoded_data is a bytes-like object, make sure it contains only # ASCII characters so we convert it to a string object if isinstance(__UpperCamelCase , __UpperCamelCase ): try: __lowercase : List[str] = encoded_data.decode('''utf-8''' ) except UnicodeDecodeError: raise ValueError('''base64 encoded data should only contain ASCII characters''' ) __lowercase : Dict = encoded_data.count('''=''' ) # Check if the encoded string contains non base64 characters if padding: assert all( char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found." else: assert all( char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found." # Check the padding assert len(__UpperCamelCase ) % 4 == 0 and padding < 3, "Incorrect padding" if padding: # Remove padding if there is one __lowercase : Tuple = encoded_data[:-padding] __lowercase : str = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2] else: __lowercase : Any = ''''''.join( bin(B64_CHARSET.index(__UpperCamelCase ) )[2:].zfill(6 ) for char in encoded_data ) __lowercase : int = [ int(binary_stream[index : index + 8] , 2 ) for index in range(0 , len(__UpperCamelCase ) , 8 ) ] return bytes(__UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" import numpy as np def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): return np.where(vector > 0 , __UpperCamelCase , (alpha * (np.exp(__UpperCamelCase ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
76
"""simple docstring""" import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } a_ = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } a_ = { 'ctrl': 2_5_6, } a_ = { 'Pregnancy': 1_6_8_6_2_9, 'Christianity': 7_6_7_5, 'Explain': 1_0_6_4_2_3, 'Fitness': 6_3_4_4_0, 'Saving': 6_3_1_6_3, 'Ask': 2_7_1_7_1, 'Ass': 9_5_9_8_5, 'Joke': 1_6_3_5_0_9, 'Questions': 4_5_6_2_2, 'Thoughts': 4_9_6_0_5, 'Retail': 5_2_3_4_2, 'Feminism': 1_6_4_3_3_8, 'Writing': 1_1_9_9_2, 'Atheism': 1_9_2_2_6_3, 'Netflix': 4_8_6_1_6, 'Computing': 3_9_6_3_9, 'Opinion': 4_3_2_1_3, 'Alone': 4_4_9_6_7, 'Funny': 5_8_9_1_7, 'Gaming': 4_0_3_5_8, 'Human': 4_0_8_8, 'India': 1_3_3_1, 'Joker': 7_7_1_3_8, 'Diet': 3_6_2_0_6, 'Legal': 1_1_8_5_9, 'Norman': 4_9_3_9, 'Tip': 7_2_6_8_9, 'Weight': 5_2_3_4_3, 'Movies': 4_6_2_7_3, 'Running': 2_3_4_2_5, 'Science': 2_0_9_0, 'Horror': 3_7_7_9_3, 'Confession': 6_0_5_7_2, 'Finance': 1_2_2_5_0, 'Politics': 1_6_3_6_0, 'Scary': 1_9_1_9_8_5, 'Support': 1_2_6_5_4, 'Technologies': 3_2_5_1_6, 'Teenage': 6_6_1_6_0, 'Event': 3_2_7_6_9, 'Learned': 6_7_4_6_0, 'Notion': 1_8_2_7_7_0, 'Wikipedia': 3_7_5_8_3, 'Books': 6_6_6_5, 'Extract': 7_6_0_5_0, 'Confessions': 1_0_2_7_0_1, 'Conspiracy': 7_5_9_3_2, 'Links': 6_3_6_7_4, 'Narcissus': 1_5_0_4_2_5, 'Relationship': 5_4_7_6_6, 'Relationships': 1_3_4_7_9_6, 'Reviews': 4_1_6_7_1, 'News': 4_2_5_6, 'Translation': 2_6_8_2_0, 'multilingual': 1_2_8_4_0_6, } def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Any = set() __lowercase : Tuple = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __lowercase : Any = char __lowercase : List[Any] = set(__UpperCamelCase ) return pairs class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTROL_CODES def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_="<unk>" , **UpperCamelCase_ ) -> int: super().__init__(unk_token=UpperCamelCase_ , **UpperCamelCase_ ) with open(UpperCamelCase_ , encoding='''utf-8''' ) as vocab_handle: __lowercase : List[Any] = json.load(UpperCamelCase_ ) __lowercase : Any = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase_ , encoding='''utf-8''' ) as merges_handle: __lowercase : Optional[Any] = merges_handle.read().split('''\n''' )[1:-1] __lowercase : Optional[Any] = [tuple(merge.split() ) for merge in merges] __lowercase : Optional[int] = dict(zip(UpperCamelCase_ , range(len(UpperCamelCase_ ) ) ) ) __lowercase : Optional[Any] = {} @property def _lowerCamelCase ( self ) -> Union[str, Any]: return len(self.encoder ) def _lowerCamelCase ( self ) -> Tuple: return dict(self.encoder , **self.added_tokens_encoder ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.cache: return self.cache[token] __lowercase : str = tuple(UpperCamelCase_ ) __lowercase : str = tuple(list(word[:-1] ) + [word[-1] + '''</w>'''] ) __lowercase : Optional[Any] = get_pairs(UpperCamelCase_ ) if not pairs: return token while True: __lowercase : Dict = min(UpperCamelCase_ , key=lambda UpperCamelCase_ : self.bpe_ranks.get(UpperCamelCase_ , float('''inf''' ) ) ) if bigram not in self.bpe_ranks: break __lowercase ,__lowercase : Tuple = bigram __lowercase : int = [] __lowercase : Union[str, Any] = 0 while i < len(UpperCamelCase_ ): try: __lowercase : Optional[int] = word.index(UpperCamelCase_ , UpperCamelCase_ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __lowercase : Tuple = j if word[i] == first and i < len(UpperCamelCase_ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __lowercase : List[str] = tuple(UpperCamelCase_ ) __lowercase : str = new_word if len(UpperCamelCase_ ) == 1: break else: __lowercase : List[str] = get_pairs(UpperCamelCase_ ) __lowercase : Optional[Any] = '''@@ '''.join(UpperCamelCase_ ) __lowercase : Dict = word[:-4] __lowercase : str = word return word def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: __lowercase : List[Any] = [] __lowercase : int = re.findall(R'''\S+\n?''' , UpperCamelCase_ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase_ ).split(''' ''' ) ) ) return split_tokens def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[Any]: return self.encoder.get(UpperCamelCase_ , self.encoder.get(self.unk_token ) ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> int: return self.decoder.get(UpperCamelCase_ , self.unk_token ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Tuple = ''' '''.join(UpperCamelCase_ ).replace('''@@ ''' , '''''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : Optional[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) __lowercase : Optional[int] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''merges_file'''] ) with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase_ , ensure_ascii=UpperCamelCase_ ) + '''\n''' ) __lowercase : List[str] = 0 with open(UpperCamelCase_ , '''w''' , encoding='''utf-8''' ) as writer: writer.write('''#version: 0.2\n''' ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase_ : kv[1] ): if index != token_index: logger.warning( F"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.""" ''' Please check that the tokenizer is not corrupted!''' ) __lowercase : Union[str, Any] = token_index writer.write(''' '''.join(UpperCamelCase_ ) + '''\n''' ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
76
1
"""simple docstring""" import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = '▁' a_ = {'vocab_file': 'prophetnet.tokenizer'} a_ = { 'vocab_file': { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer' ), } } a_ = { 'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False}, } a_ = { 'microsoft/xprophetnet-large-wiki100-cased': 5_1_2, } def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : str = collections.OrderedDict() with open(__UpperCamelCase , '''r''' , encoding='''utf-8''' ) as reader: __lowercase : Any = reader.readlines() for index, token in enumerate(__UpperCamelCase ): __lowercase : Any = token.rstrip('''\n''' ) __lowercase : Union[str, Any] = index return vocab class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[UNK]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: __lowercase : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise __lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __lowercase : Union[str, Any] = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __lowercase : str = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4} for i in range(10 ): __lowercase : Optional[int] = F"""[unused{i}]""" __lowercase : Any = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __lowercase : int = 12 __lowercase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(UpperCamelCase_ ) def __getstate__( self ) -> Tuple: __lowercase : Any = self.__dict__.copy() __lowercase : List[str] = None return state def __setstate__( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : int = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowercase : int = {} __lowercase : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return ([0] * len(UpperCamelCase_ )) + [1] return ([0] * len(UpperCamelCase_ )) + [1] + ([0] * len(UpperCamelCase_ )) + [1] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: __lowercase : Union[str, Any] = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self ) -> Union[str, Any]: return len(self.sp_model ) + self.fairseq_offset def _lowerCamelCase ( self ) -> int: __lowercase : Any = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: __lowercase : Any = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : List[str] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: __lowercase : Tuple = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: if token_ids_a is None: return token_ids_a + [self.sep_token_id] __lowercase : int = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.''' ''' Please use LayoutLMv2ImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging a_ = logging.get_logger(__name__) a_ = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class UpperCAmelCase_ ( snake_case ): UpperCamelCase ="lxmert" UpperCamelCase ={} def __init__( self , UpperCamelCase_=3_05_22 , UpperCamelCase_=7_68 , UpperCamelCase_=12 , UpperCamelCase_=95_00 , UpperCamelCase_=16_00 , UpperCamelCase_=4_00 , UpperCamelCase_=30_72 , UpperCamelCase_="gelu" , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=5_12 , UpperCamelCase_=2 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-12 , UpperCamelCase_=9 , UpperCamelCase_=5 , UpperCamelCase_=5 , UpperCamelCase_=20_48 , UpperCamelCase_=4 , UpperCamelCase_=6.6_7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , **UpperCamelCase_ , ) -> List[Any]: __lowercase : Optional[Any] = vocab_size __lowercase : Any = hidden_size __lowercase : Any = num_attention_heads __lowercase : List[Any] = hidden_act __lowercase : Optional[int] = intermediate_size __lowercase : str = hidden_dropout_prob __lowercase : Union[str, Any] = attention_probs_dropout_prob __lowercase : Tuple = max_position_embeddings __lowercase : Optional[Any] = type_vocab_size __lowercase : Optional[int] = initializer_range __lowercase : List[str] = layer_norm_eps __lowercase : str = num_qa_labels __lowercase : Optional[Any] = num_object_labels __lowercase : Dict = num_attr_labels __lowercase : Dict = l_layers __lowercase : str = x_layers __lowercase : List[Any] = r_layers __lowercase : List[Any] = visual_feat_dim __lowercase : Tuple = visual_pos_dim __lowercase : Union[str, Any] = visual_loss_normalizer __lowercase : str = task_matched __lowercase : Any = task_mask_lm __lowercase : List[str] = task_obj_predict __lowercase : Tuple = task_qa __lowercase : int = visual_obj_loss __lowercase : int = visual_attr_loss __lowercase : Optional[Any] = visual_feat_loss __lowercase : Union[str, Any] = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers} super().__init__(**UpperCamelCase_ )
76
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = '▁' a_ = {'vocab_file': 'sentencepiece.bpe.model'} a_ = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } a_ = { 'xlm-roberta-base': 5_1_2, 'xlm-roberta-large': 5_1_2, 'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2, 'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2, 'xlm-roberta-large-finetuned-conll03-english': 5_1_2, 'xlm-roberta-large-finetuned-conll03-german': 5_1_2, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __lowercase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __lowercase : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase : Tuple = 1 __lowercase : Any = len(self.sp_model ) + self.fairseq_offset __lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: __lowercase : int = self.__dict__.copy() __lowercase : int = None __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ) -> Tuple: __lowercase : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowercase : str = {} __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase : Dict = [self.cls_token_id] __lowercase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: __lowercase : Optional[Any] = [self.sep_token_id] __lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _lowerCamelCase ( self ) -> str: __lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
76
1
"""simple docstring""" from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. a_ = 2_0_0 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. a_ = 5_0 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. a_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_0_0_0)) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : int = len([g for position, g in enumerate(__UpperCamelCase ) if g == main_target[position]] ) return (item, float(__UpperCamelCase )) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : int = random.randint(0 , len(__UpperCamelCase ) - 1 ) __lowercase : Optional[Any] = parent_a[:random_slice] + parent_a[random_slice:] __lowercase : Dict = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Optional[int] = list(__UpperCamelCase ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: __lowercase : Optional[Any] = random.choice(__UpperCamelCase ) return "".join(__UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , ): __lowercase : Dict = [] # Generate more children proportionally to the fitness score. __lowercase : Tuple = int(parent_a[1] * 1_00 ) + 1 __lowercase : str = 10 if child_n >= 10 else child_n for _ in range(__UpperCamelCase ): __lowercase : Union[str, Any] = population_score[random.randint(0 , __UpperCamelCase )][0] __lowercase ,__lowercase : List[str] = crossover(parent_a[0] , __UpperCamelCase ) # Append new string to the population list. pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) ) pop.append(mutate(__UpperCamelCase , __UpperCamelCase ) ) return pop def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = True ): # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: __lowercase : Optional[Any] = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(__UpperCamelCase ) # Verify that the target contains no genes besides the ones inside genes variable. __lowercase : Optional[int] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __lowercase : Union[str, Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(__UpperCamelCase ) # Generate random starting population. __lowercase : Dict = [] for _ in range(__UpperCamelCase ): population.append(''''''.join([random.choice(__UpperCamelCase ) for i in range(len(__UpperCamelCase ) )] ) ) # Just some logs to know what the algorithms is doing. __lowercase ,__lowercase : Tuple = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__UpperCamelCase ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __lowercase : str = [evaluate(__UpperCamelCase , __UpperCamelCase ) for item in population] # Check if there is a matching evolution. __lowercase : Union[str, Any] = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[1] , reverse=__UpperCamelCase ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __lowercase : str = population[: int(N_POPULATION / 3 )] population.clear() population.extend(__UpperCamelCase ) # Normalize population score to be between 0 and 1. __lowercase : List[Any] = [ (item, score / len(__UpperCamelCase )) for item, score in population_score ] # This is selection for i in range(__UpperCamelCase ): population.extend(select(population_score[int(__UpperCamelCase )] , __UpperCamelCase , __UpperCamelCase ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__UpperCamelCase ) > N_POPULATION: break if __name__ == "__main__": a_ = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) a_ = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) a_ , a_ , a_ = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
76
"""simple docstring""" import logging import os import quant_trainer import torch from torch.utils.data import DataLoader from transformers import Trainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput a_ = logging.getLogger(__name__) if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Tuple: super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Union[str, Any] = eval_examples __lowercase : Union[str, Any] = post_process_function __lowercase : Any = quant_trainer_args __lowercase : Optional[Any] = 1_28 # default number of calibration samples def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: if calib_dataset is None and self.calib_dataset is None: raise ValueError('''Trainer: calibration requires an calib_dataset.''' ) __lowercase : Tuple = calib_dataset if calib_dataset is not None else self.calib_dataset __lowercase : str = self._remove_unused_columns(UpperCamelCase_ , description='''Calibration''' ) return DataLoader( UpperCamelCase_ , batch_size=self.args.eval_batch_size , collate_fn=self.data_collator , drop_last=self.args.dataloader_drop_last , num_workers=self.args.dataloader_num_workers , pin_memory=self.args.dataloader_pin_memory , shuffle=UpperCamelCase_ , ) def _lowerCamelCase ( self , UpperCamelCase_=None ) -> Any: __lowercase : Optional[int] = self.train_dataset if calib_dataset is None else calib_dataset __lowercase : List[Any] = self.get_calib_dataloader(UpperCamelCase_ ) __lowercase : Dict = self.model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args , calib=UpperCamelCase_ ) model.eval() quant_trainer.enable_calibration(UpperCamelCase_ ) logger.info('''***** Running calibration *****''' ) logger.info(F""" Num examples = {self.calib_num}""" ) logger.info(F""" Batch size = {calib_dataloader.batch_size}""" ) for step, inputs in enumerate(UpperCamelCase_ ): # Prediction step __lowercase ,__lowercase ,__lowercase : Optional[Any] = self.prediction_step(UpperCamelCase_ , UpperCamelCase_ , prediction_loss_only=UpperCamelCase_ ) if (step + 1) * calib_dataloader.batch_size >= self.calib_num: break quant_trainer.finish_calibration(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = model def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_ = "eval" ) -> str: __lowercase : Tuple = self.eval_dataset if eval_dataset is None else eval_dataset __lowercase : Union[str, Any] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : str = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. __lowercase : Optional[int] = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Tuple = eval_loop( UpperCamelCase_ , description='''Evaluation''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : List[str] = compute_metrics if self.post_process_function is not None and self.compute_metrics is not None: __lowercase : int = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) self.log(UpperCamelCase_ ) else: __lowercase : Dict = {} if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) __lowercase : int = self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCamelCase_ ) return metrics def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_ = "test" ) -> List[Any]: __lowercase : Optional[int] = self.get_test_dataloader(UpperCamelCase_ ) # Temporarily disable metric computation, we will do it in the loop here. __lowercase : str = self.compute_metrics __lowercase : Dict = None __lowercase : List[str] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: __lowercase : Union[str, Any] = eval_loop( UpperCamelCase_ , description='''Prediction''' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCamelCase_ , ) finally: __lowercase : Any = compute_metrics if self.post_process_function is None or self.compute_metrics is None: return output __lowercase : Dict = self.post_process_function(UpperCamelCase_ , UpperCamelCase_ , output.predictions , '''predict''' ) __lowercase : Optional[int] = self.compute_metrics(UpperCamelCase_ ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F"""{metric_key_prefix}_""" ): __lowercase : List[str] = metrics.pop(UpperCamelCase_ ) return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_="./" ) -> int: __lowercase : Optional[int] = self.eval_dataset __lowercase : Optional[int] = self.get_eval_dataloader(UpperCamelCase_ ) __lowercase : Any = next(iter(UpperCamelCase_ ) ) # saving device - to make it consistent __lowercase : Any = torch.device('''cuda''' if torch.cuda.is_available() else '''cpu''' ) # convert to tuple __lowercase : Tuple = tuple(v.to(UpperCamelCase_ ) for k, v in batch.items() ) logger.info('''Converting model to be onnx compatible''' ) from pytorch_quantization.nn import TensorQuantizer __lowercase : List[Any] = True __lowercase : int = self.model.to(UpperCamelCase_ ) model.eval() model.float() __lowercase : Optional[int] = model.module if hasattr(UpperCamelCase_ , '''module''' ) else model quant_trainer.configure_model(UpperCamelCase_ , self.quant_trainer_args ) __lowercase : Tuple = os.path.join(UpperCamelCase_ , '''model.onnx''' ) logger.info(F"""exporting model to {output_model_file}""" ) __lowercase : Tuple = {0: '''batch_size''', 1: '''seq_len'''} torch.onnx.export( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , export_params=UpperCamelCase_ , opset_version=13 , do_constant_folding=UpperCamelCase_ , input_names=['''input_ids''', '''attention_mask''', '''token_type_ids'''] , output_names=['''output_start_logits''', '''output_end_logits'''] , dynamic_axes={ '''input_ids''': axes, '''attention_mask''': axes, '''token_type_ids''': axes, '''output_start_logits''': axes, '''output_end_logits''': axes, } , verbose=UpperCamelCase_ , ) logger.info('''onnx export finished''' )
76
1
"""simple docstring""" from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ ) -> Optional[int]: __lowercase : Tuple = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase_ ) __lowercase : Tuple = self.values[key] def _lowerCamelCase ( self ) -> List[Any]: return ( sum(self.charge_factor - len(UpperCamelCase_ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ) -> List[str]: if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase_ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase_ , UpperCamelCase_ )
76
"""simple docstring""" import math import flax.linen as nn import jax.numpy as jnp def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = 1 , __UpperCamelCase = 1 , __UpperCamelCase = 1.0e4 , __UpperCamelCase = False , __UpperCamelCase = 1.0 , ): assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f"""Embedding dimension {embedding_dim} should be even""" __lowercase : Dict = float(embedding_dim // 2 ) __lowercase : Tuple = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __lowercase : List[Any] = min_timescale * jnp.exp(jnp.arange(__UpperCamelCase , dtype=jnp.floataa ) * -log_timescale_increment ) __lowercase : Any = jnp.expand_dims(__UpperCamelCase , 1 ) * jnp.expand_dims(__UpperCamelCase , 0 ) # scale embeddings __lowercase : Optional[int] = scale * emb if flip_sin_to_cos: __lowercase : Any = jnp.concatenate([jnp.cos(__UpperCamelCase ), jnp.sin(__UpperCamelCase )] , axis=1 ) else: __lowercase : List[str] = jnp.concatenate([jnp.sin(__UpperCamelCase ), jnp.cos(__UpperCamelCase )] , axis=1 ) __lowercase : int = jnp.reshape(__UpperCamelCase , [jnp.shape(__UpperCamelCase )[0], embedding_dim] ) return signal class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =jnp.floataa @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: __lowercase : Union[str, Any] = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_1''' )(UpperCamelCase_ ) __lowercase : str = nn.silu(UpperCamelCase_ ) __lowercase : Dict = nn.Dense(self.time_embed_dim , dtype=self.dtype , name='''linear_2''' )(UpperCamelCase_ ) return temb class UpperCAmelCase_ ( nn.Module ): UpperCamelCase =32 UpperCamelCase =False UpperCamelCase =1 @nn.compact def __call__( self , UpperCamelCase_ ) -> Optional[int]: return get_sinusoidal_embeddings( UpperCamelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
76
1
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') a_ = int(input('Enter number: ').strip()) print(F"{number} is {'' if perfect(number) else 'not '}a Perfect Number.")
76
"""simple docstring""" import os import sys a_ = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) a_ = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoConfig.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoTokenizer.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoTokenizer.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModel.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModel.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForCausalLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForMaskedLM.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForSequenceClassification.from_pretrained(*__UpperCamelCase , **__UpperCamelCase ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): return AutoModelForQuestionAnswering.from_pretrained(*__UpperCamelCase , **__UpperCamelCase )
76
1
"""simple docstring""" import copy import random from transformers import CLIPTokenizer class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[Any]: super().__init__(*UpperCamelCase_ , **UpperCamelCase_ ) __lowercase : Dict = {} def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: __lowercase : List[Any] = super().add_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) if num_added_tokens == 0: raise ValueError( F"""The tokenizer already contains the token {placeholder_token}. Please pass a different""" ''' `placeholder_token` that is not already in the tokenizer.''' ) def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=1 , **UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Optional[int] = [] if num_vec_per_token == 1: self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) output.append(UpperCamelCase_ ) else: __lowercase : Optional[int] = [] for i in range(UpperCamelCase_ ): __lowercase : List[Any] = placeholder_token + F"""_{i}""" self.try_adding_tokens(UpperCamelCase_ , *UpperCamelCase_ , **UpperCamelCase_ ) output.append(UpperCamelCase_ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F"""The tokenizer already has placeholder token {token} that can get confused with""" F""" {placeholder_token}keep placeholder tokens independent""" ) __lowercase : Optional[int] = output def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 ) -> Optional[int]: if isinstance(UpperCamelCase_ , UpperCamelCase_ ): __lowercase : Tuple = [] for i in range(len(UpperCamelCase_ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase_ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: __lowercase : List[Any] = self.token_map[placeholder_token] __lowercase : List[str] = tokens[: 1 + int(len(UpperCamelCase_ ) * prop_tokens_to_load )] if vector_shuffle: __lowercase : Optional[Any] = copy.copy(UpperCamelCase_ ) random.shuffle(UpperCamelCase_ ) __lowercase : Union[str, Any] = text.replace(UpperCamelCase_ , ''' '''.join(UpperCamelCase_ ) ) return text def __call__( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Optional[int]: return super().__call__( self.replace_placeholder_tokens_in_text( UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , ) def _lowerCamelCase ( self , UpperCamelCase_ , *UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=1.0 , **UpperCamelCase_ ) -> Dict: return super().encode( self.replace_placeholder_tokens_in_text( UpperCamelCase_ , vector_shuffle=UpperCamelCase_ , prop_tokens_to_load=UpperCamelCase_ ) , *UpperCamelCase_ , **UpperCamelCase_ , )
76
"""simple docstring""" from math import pi, sqrt, tan def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''surface_area_cube() only accepts non-negative values''' ) return 6 * side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if length < 0 or breadth < 0 or height < 0: raise ValueError('''surface_area_cuboid() only accepts non-negative values''' ) return 2 * ((length * breadth) + (breadth * height) + (length * height)) def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_sphere() only accepts non-negative values''' ) return 4 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' ) return 3 * pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cone() only accepts non-negative values''' ) return pi * radius * (radius + (height**2 + radius**2) ** 0.5) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if radius_a < 0 or radius_a < 0 or height < 0: raise ValueError( '''surface_area_conical_frustum() only accepts non-negative values''' ) __lowercase : List[str] = (height**2 + (radius_a - radius_a) ** 2) ** 0.5 return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius < 0 or height < 0: raise ValueError('''surface_area_cylinder() only accepts non-negative values''' ) return 2 * pi * radius * (height + radius) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if torus_radius < 0 or tube_radius < 0: raise ValueError('''surface_area_torus() only accepts non-negative values''' ) if torus_radius < tube_radius: raise ValueError( '''surface_area_torus() does not support spindle or self intersecting tori''' ) return 4 * pow(__UpperCamelCase , 2 ) * torus_radius * tube_radius def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if length < 0 or width < 0: raise ValueError('''area_rectangle() only accepts non-negative values''' ) return length * width def __UpperCAmelCase ( __UpperCamelCase ): if side_length < 0: raise ValueError('''area_square() only accepts non-negative values''' ) return side_length**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_triangle() only accepts non-negative values''' ) return (base * height) / 2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if sidea < 0 or sidea < 0 or sidea < 0: raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' ) elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea: raise ValueError('''Given three sides do not form a triangle''' ) __lowercase : int = (sidea + sidea + sidea) / 2 __lowercase : List[Any] = sqrt( semi_perimeter * (semi_perimeter - sidea) * (semi_perimeter - sidea) * (semi_perimeter - sidea) ) return area def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if base < 0 or height < 0: raise ValueError('''area_parallelogram() only accepts non-negative values''' ) return base * height def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): if basea < 0 or basea < 0 or height < 0: raise ValueError('''area_trapezium() only accepts non-negative values''' ) return 1 / 2 * (basea + basea) * height def __UpperCAmelCase ( __UpperCamelCase ): if radius < 0: raise ValueError('''area_circle() only accepts non-negative values''' ) return pi * radius**2 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if radius_x < 0 or radius_y < 0: raise ValueError('''area_ellipse() only accepts non-negative values''' ) return pi * radius_x * radius_y def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if diagonal_a < 0 or diagonal_a < 0: raise ValueError('''area_rhombus() only accepts non-negative values''' ) return 1 / 2 * diagonal_a * diagonal_a def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if not isinstance(__UpperCamelCase , __UpperCamelCase ) or sides < 3: raise ValueError( '''area_reg_polygon() only accepts integers greater than or \ equal to three as number of sides''' ) elif length < 0: raise ValueError( '''area_reg_polygon() only accepts non-negative values as \ length of a side''' ) return (sides * length**2) / (4 * tan(pi / sides )) return (sides * length**2) / (4 * tan(pi / sides )) if __name__ == "__main__": import doctest doctest.testmod(verbose=True) # verbose so we can see methods missing tests print('[DEMO] Areas of various geometric shapes: \n') print(F"Rectangle: {area_rectangle(1_0, 2_0) = }") print(F"Square: {area_square(1_0) = }") print(F"Triangle: {area_triangle(1_0, 1_0) = }") print(F"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }") print(F"Parallelogram: {area_parallelogram(1_0, 2_0) = }") print(F"Rhombus: {area_rhombus(1_0, 2_0) = }") print(F"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }") print(F"Circle: {area_circle(2_0) = }") print(F"Ellipse: {area_ellipse(1_0, 2_0) = }") print('\nSurface Areas of various geometric shapes: \n') print(F"Cube: {surface_area_cube(2_0) = }") print(F"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }") print(F"Sphere: {surface_area_sphere(2_0) = }") print(F"Hemisphere: {surface_area_hemisphere(2_0) = }") print(F"Cone: {surface_area_cone(1_0, 2_0) = }") print(F"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }") print(F"Cylinder: {surface_area_cylinder(1_0, 2_0) = }") print(F"Torus: {surface_area_torus(2_0, 1_0) = }") print(F"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }") print(F"Square: {area_reg_polygon(4, 1_0) = }") print(F"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
76
1
"""simple docstring""" a_ = 8.3144598 def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if temperature < 0: raise Exception('''Temperature cannot be less than 0 K''' ) if molar_mass <= 0: raise Exception('''Molar mass cannot be less than or equal to 0 kg/mol''' ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example a_ = 3_0_0 a_ = 2_8 a_ = rms_speed_of_molecule(temperature, molar_mass) print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): # noqa: E741 while r - l > 1: __lowercase : int = (l + r) // 2 if v[m] >= key: __lowercase : Any = m else: __lowercase : List[Any] = m # noqa: E741 return r def __UpperCAmelCase ( __UpperCamelCase ): if len(__UpperCamelCase ) == 0: return 0 __lowercase : List[str] = [0] * len(__UpperCamelCase ) __lowercase : Any = 1 __lowercase : Dict = v[0] for i in range(1 , len(__UpperCamelCase ) ): if v[i] < tail[0]: __lowercase : Tuple = v[i] elif v[i] > tail[length - 1]: __lowercase : Optional[Any] = v[i] length += 1 else: __lowercase : Dict = v[i] return length if __name__ == "__main__": import doctest doctest.testmod()
76
1
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase ): return [ord(__UpperCamelCase ) - 96 for elem in plain] def __UpperCAmelCase ( __UpperCamelCase ): return "".join(chr(elem + 96 ) for elem in encoded ) def __UpperCAmelCase ( ): __lowercase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''' , __UpperCamelCase ) print('''Decoded:''' , decode(__UpperCamelCase ) ) if __name__ == "__main__": main()
76
"""simple docstring""" from __future__ import annotations def __UpperCAmelCase ( __UpperCamelCase = 4 ): __lowercase : Dict = abs(__UpperCamelCase ) or 4 return [[1 + x + y * row_size for x in range(__UpperCamelCase )] for y in range(__UpperCamelCase )] def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_column(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_row(reverse_column(__UpperCamelCase ) ) # OR.. reverse_column(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): return reverse_column(transpose(__UpperCamelCase ) ) # OR.. transpose(reverse_row(matrix)) def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [list(__UpperCamelCase ) for x in zip(*__UpperCamelCase )] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Union[str, Any] = matrix[::-1] return matrix def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Dict = [x[::-1] for x in matrix] return matrix def __UpperCAmelCase ( __UpperCamelCase ): for i in matrix: print(*__UpperCamelCase ) if __name__ == "__main__": a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) a_ = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
1
"""simple docstring""" import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[Any] = fname.split(os.path.sep )[-1] return re.search(R'''^(.*)_\d+\.jpg$''' , __UpperCamelCase ).groups()[0] class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> int: __lowercase : Union[str, Any] = file_names __lowercase : int = image_transform __lowercase : int = label_to_id def __len__( self ) -> Dict: return len(self.file_names ) def __getitem__( self , UpperCamelCase_ ) -> List[Any]: __lowercase : Optional[Any] = self.file_names[idx] __lowercase : Tuple = PIL.Image.open(UpperCamelCase_ ) __lowercase : Any = raw_image.convert('''RGB''' ) if self.image_transform is not None: __lowercase : List[Any] = self.image_transform(UpperCamelCase_ ) __lowercase : int = extract_label(UpperCamelCase_ ) if self.label_to_id is not None: __lowercase : List[Any] = self.label_to_id[label] return {"image": image, "label": label} def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): # Initialize accelerator if args.with_tracking: __lowercase : int = Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='''all''' , project_dir=args.project_dir ) else: __lowercase : Dict = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __lowercase : Any = config['''lr'''] __lowercase : Optional[Any] = int(config['''num_epochs'''] ) __lowercase : Union[str, Any] = int(config['''seed'''] ) __lowercase : Tuple = int(config['''batch_size'''] ) __lowercase : Union[str, Any] = config['''image_size'''] if not isinstance(__UpperCamelCase , (list, tuple) ): __lowercase : Any = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps , '''isdigit''' ): if args.checkpointing_steps == "epoch": __lowercase : str = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __lowercase : Optional[int] = int(args.checkpointing_steps ) else: raise ValueError( f"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" ) else: __lowercase : Union[str, Any] = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __lowercase : int = os.path.split(__UpperCamelCase )[-1].split('''.''' )[0] accelerator.init_trackers(__UpperCamelCase , __UpperCamelCase ) # Grab all the image filenames __lowercase : Optional[int] = [os.path.join(args.data_dir , __UpperCamelCase ) for fname in os.listdir(args.data_dir ) if fname.endswith('''.jpg''' )] # Build the label correspondences __lowercase : List[Any] = [extract_label(__UpperCamelCase ) for fname in file_names] __lowercase : int = list(set(__UpperCamelCase ) ) id_to_label.sort() __lowercase : int = {lbl: i for i, lbl in enumerate(__UpperCamelCase )} # Set the seed before splitting the data. np.random.seed(__UpperCamelCase ) torch.manual_seed(__UpperCamelCase ) torch.cuda.manual_seed_all(__UpperCamelCase ) # Split our filenames between train and validation __lowercase : Tuple = np.random.permutation(len(__UpperCamelCase ) ) __lowercase : Dict = int(0.8 * len(__UpperCamelCase ) ) __lowercase : List[str] = random_perm[:cut] __lowercase : Tuple = random_perm[cut:] # For training we use a simple RandomResizedCrop __lowercase : str = Compose([RandomResizedCrop(__UpperCamelCase , scale=(0.5, 1.0) ), ToTensor()] ) __lowercase : Tuple = PetsDataset( [file_names[i] for i in train_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase ) # For evaluation, we use a deterministic Resize __lowercase : Tuple = Compose([Resize(__UpperCamelCase ), ToTensor()] ) __lowercase : Union[str, Any] = PetsDataset([file_names[i] for i in eval_split] , image_transform=__UpperCamelCase , label_to_id=__UpperCamelCase ) # Instantiate dataloaders. __lowercase : Any = DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 ) __lowercase : Optional[int] = DataLoader(__UpperCamelCase , shuffle=__UpperCamelCase , batch_size=__UpperCamelCase , num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __lowercase : Optional[Any] = create_model('''resnet50d''' , pretrained=__UpperCamelCase , num_classes=len(__UpperCamelCase ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __lowercase : List[str] = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __lowercase : Optional[Any] = False for param in model.get_classifier().parameters(): __lowercase : Dict = True # We normalize the batches of images to be a bit faster. __lowercase : Optional[Any] = torch.tensor(model.default_cfg['''mean'''] )[None, :, None, None].to(accelerator.device ) __lowercase : int = torch.tensor(model.default_cfg['''std'''] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __lowercase : Union[str, Any] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 ) # Instantiate learning rate scheduler __lowercase : Optional[int] = OneCycleLR(optimizer=__UpperCamelCase , max_lr=__UpperCamelCase , epochs=__UpperCamelCase , steps_per_epoch=len(__UpperCamelCase ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase : Optional[Any] = accelerator.prepare( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # We need to keep track of how many total steps we have iterated over __lowercase : Optional[Any] = 0 # We also need to keep track of the starting epoch so files are named properly __lowercase : Optional[Any] = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f"""Resumed from checkpoint: {args.resume_from_checkpoint}""" ) accelerator.load_state(args.resume_from_checkpoint ) __lowercase : Optional[int] = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __lowercase : List[Any] = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __lowercase : Dict = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __lowercase : Union[str, Any] = os.path.splitext(__UpperCamelCase )[0] if "epoch" in training_difference: __lowercase : Any = int(training_difference.replace('''epoch_''' , '''''' ) ) + 1 __lowercase : Optional[int] = None else: __lowercase : Union[str, Any] = int(training_difference.replace('''step_''' , '''''' ) ) __lowercase : Dict = resume_step // len(__UpperCamelCase ) resume_step -= starting_epoch * len(__UpperCamelCase ) # Now we train the model for epoch in range(__UpperCamelCase , __UpperCamelCase ): model.train() if args.with_tracking: __lowercase : Optional[int] = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __lowercase : str = accelerator.skip_first_batches(__UpperCamelCase , __UpperCamelCase ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __lowercase : Optional[Any] = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __lowercase : int = {k: v.to(accelerator.device ) for k, v in batch.items()} __lowercase : Dict = (batch['''image'''] - mean) / std __lowercase : List[str] = model(__UpperCamelCase ) __lowercase : str = torch.nn.functional.cross_entropy(__UpperCamelCase , batch['''label'''] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(__UpperCamelCase ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(__UpperCamelCase , __UpperCamelCase ): __lowercase : Union[str, Any] = f"""step_{overall_step}""" if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __lowercase : Optional[int] = os.path.join(args.output_dir , __UpperCamelCase ) accelerator.save_state(__UpperCamelCase ) model.eval() __lowercase : List[str] = 0 __lowercase : Tuple = 0 for step, batch in enumerate(__UpperCamelCase ): # We could avoid this line since we set the accelerator with `device_placement=True`. __lowercase : int = {k: v.to(accelerator.device ) for k, v in batch.items()} __lowercase : int = (batch['''image'''] - mean) / std with torch.no_grad(): __lowercase : Dict = model(__UpperCamelCase ) __lowercase : Optional[Any] = outputs.argmax(dim=-1 ) __lowercase ,__lowercase : List[str] = accelerator.gather_for_metrics((predictions, batch['''label''']) ) __lowercase : Optional[int] = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __lowercase : Tuple = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f"""epoch {epoch}: {1_00 * eval_metric:.2f}""" ) if args.with_tracking: accelerator.log( { '''accuracy''': 1_00 * eval_metric, '''train_loss''': total_loss.item() / len(__UpperCamelCase ), '''epoch''': epoch, } , step=__UpperCamelCase , ) if checkpointing_steps == "epoch": __lowercase : Optional[Any] = f"""epoch_{epoch}""" if args.output_dir is not None: __lowercase : Optional[int] = os.path.join(args.output_dir , __UpperCamelCase ) accelerator.save_state(__UpperCamelCase ) if args.with_tracking: accelerator.end_training() def __UpperCAmelCase ( ): __lowercase : Any = argparse.ArgumentParser(description='''Simple example of training script.''' ) parser.add_argument('''--data_dir''' , required=__UpperCamelCase , help='''The data folder on disk.''' ) parser.add_argument('''--fp16''' , action='''store_true''' , help='''If passed, will use FP16 training.''' ) parser.add_argument( '''--mixed_precision''' , type=__UpperCamelCase , default=__UpperCamelCase , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose''' '''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.''' '''and an Nvidia Ampere GPU.''' , ) parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' ) parser.add_argument( '''--checkpointing_steps''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.''' , ) parser.add_argument( '''--output_dir''' , type=__UpperCamelCase , default='''.''' , help='''Optional save directory where all checkpoint folders will be stored. Default is the current working directory.''' , ) parser.add_argument( '''--resume_from_checkpoint''' , type=__UpperCamelCase , default=__UpperCamelCase , help='''If the training should continue from a checkpoint folder.''' , ) parser.add_argument( '''--with_tracking''' , action='''store_true''' , help='''Whether to load in all available experiment trackers from the environment and use them for logging.''' , ) parser.add_argument( '''--project_dir''' , type=__UpperCamelCase , default='''logs''' , help='''Location on where to store experiment tracking logs` and relevent project information''' , ) __lowercase : Optional[Any] = parser.parse_args() __lowercase : str = {'''lr''': 3e-2, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 64, '''image_size''': 2_24} training_function(__UpperCamelCase , __UpperCamelCase ) if __name__ == "__main__": main()
76
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} a_ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': 5_1_2, 'facebook/dpr-ctx_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': 5_1_2, 'facebook/dpr-question_encoder-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-reader-single-nq-base': 5_1_2, 'facebook/dpr-reader-multiset-base': 5_1_2, } a_ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } a_ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION a_ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) a_ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) a_ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(snake_case ) class UpperCAmelCase_ : def __call__( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> BatchEncoding: if titles is None and texts is None: return super().__call__( UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) elif titles is None or texts is None: __lowercase : int = titles if texts is None else texts return super().__call__( UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ , return_attention_mask=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Optional[int] = titles if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [titles] __lowercase : Optional[int] = texts if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [texts] __lowercase : str = len(UpperCamelCase_ ) __lowercase : List[Any] = questions if not isinstance(UpperCamelCase_ , UpperCamelCase_ ) else [questions] * n_passages if len(UpperCamelCase_ ) != len(UpperCamelCase_ ): raise ValueError( F"""There should be as many titles than texts but got {len(UpperCamelCase_ )} titles and {len(UpperCamelCase_ )} texts.""" ) __lowercase : int = super().__call__(UpperCamelCase_ , UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : List[Any] = super().__call__(UpperCamelCase_ , add_special_tokens=UpperCamelCase_ , padding=UpperCamelCase_ , truncation=UpperCamelCase_ )['''input_ids'''] __lowercase : Optional[Any] = { '''input_ids''': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase_ , UpperCamelCase_ ) ] } if return_attention_mask is not False: __lowercase : str = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __lowercase : List[str] = attention_mask return self.pad(UpperCamelCase_ , padding=UpperCamelCase_ , max_length=UpperCamelCase_ , return_tensors=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = 16 , UpperCamelCase_ = 64 , UpperCamelCase_ = 4 , ) -> List[DPRSpanPrediction]: __lowercase : List[Any] = reader_input['''input_ids'''] __lowercase ,__lowercase ,__lowercase : List[str] = reader_output[:3] __lowercase : Optional[int] = len(UpperCamelCase_ ) __lowercase : Any = sorted(range(UpperCamelCase_ ) , reverse=UpperCamelCase_ , key=relevance_logits.__getitem__ ) __lowercase : List[DPRReaderOutput] = [] for doc_id in sorted_docs: __lowercase : Any = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __lowercase : Tuple = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __lowercase : Optional[Any] = sequence_ids.index(self.pad_token_id ) else: __lowercase : List[Any] = len(UpperCamelCase_ ) __lowercase : List[str] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase_ , top_spans=UpperCamelCase_ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase_ , start_index=UpperCamelCase_ , end_index=UpperCamelCase_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase_ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , ) -> List[DPRSpanPrediction]: __lowercase : Tuple = [] for start_index, start_score in enumerate(UpperCamelCase_ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __lowercase : int = sorted(UpperCamelCase_ , key=lambda UpperCamelCase_ : x[1] , reverse=UpperCamelCase_ ) __lowercase : Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F"""Wrong span indices: [{start_index}:{end_index}]""" ) __lowercase : Any = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F"""Span is too long: {length} > {max_answer_length}""" ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase_ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(snake_case ) class UpperCAmelCase_ ( snake_case , snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =READER_PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =READER_PRETRAINED_INIT_CONFIGURATION UpperCamelCase =["input_ids", "attention_mask"]
76
1
"""simple docstring""" import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): def __init__( self , UpperCamelCase_ , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = 32 , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = True , UpperCamelCase_ = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , UpperCamelCase_ = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , UpperCamelCase_ = True , UpperCamelCase_=7 , UpperCamelCase_=30 , UpperCamelCase_=4_00 , UpperCamelCase_=3 , ) -> Union[str, Any]: __lowercase : Optional[int] = parent __lowercase : Any = do_resize __lowercase : Any = size if size is not None else {'''shortest_edge''': 2_88} __lowercase : Any = size_divisor __lowercase : Optional[int] = do_rescale __lowercase : Dict = rescale_factor __lowercase : Any = do_normalize __lowercase : Union[str, Any] = do_center_crop __lowercase : Any = image_mean __lowercase : int = image_std __lowercase : Dict = do_pad __lowercase : Dict = batch_size __lowercase : str = num_channels __lowercase : List[Any] = min_resolution __lowercase : int = max_resolution def _lowerCamelCase ( self ) -> Optional[Any]: return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> Any: if not batched: __lowercase : Optional[Any] = self.size['''shortest_edge'''] __lowercase : Optional[Any] = image_inputs[0] if isinstance(UpperCamelCase_ , Image.Image ): __lowercase ,__lowercase : Optional[Any] = image.size else: __lowercase ,__lowercase : List[str] = image.shape[1], image.shape[2] __lowercase : List[str] = size / min(UpperCamelCase_ , UpperCamelCase_ ) if h < w: __lowercase ,__lowercase : Optional[Any] = size, scale * w else: __lowercase ,__lowercase : Dict = scale * h, size __lowercase : Dict = int((13_33 / 8_00) * size ) if max(UpperCamelCase_ , UpperCamelCase_ ) > max_size: __lowercase : List[Any] = max_size / max(UpperCamelCase_ , UpperCamelCase_ ) __lowercase : List[str] = newh * scale __lowercase : int = neww * scale __lowercase ,__lowercase : Tuple = int(newh + 0.5 ), int(neww + 0.5 ) __lowercase ,__lowercase : int = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __lowercase : List[str] = [] for image in image_inputs: __lowercase ,__lowercase : Tuple = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __lowercase : int = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[0] )[0] __lowercase : Union[str, Any] = max(UpperCamelCase_ , key=lambda UpperCamelCase_ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase_ ( snake_case , unittest.TestCase ): UpperCamelCase =BridgeTowerImageProcessor if is_vision_available() else None def _lowerCamelCase ( self ) -> str: __lowercase : List[str] = BridgeTowerImageProcessingTester(self ) @property def _lowerCamelCase ( self ) -> Optional[int]: return self.image_processor_tester.prepare_image_processor_dict() def _lowerCamelCase ( self ) -> Tuple: __lowercase : Optional[Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_mean''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''image_std''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_normalize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size''' ) ) self.assertTrue(hasattr(UpperCamelCase_ , '''size_divisor''' ) ) def _lowerCamelCase ( self ) -> Union[str, Any]: pass def _lowerCamelCase ( self ) -> Tuple: # Initialize image processor __lowercase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , Image.Image ) # Test not batched input __lowercase : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowercase ,__lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : List[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values __lowercase ,__lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowerCamelCase ( self ) -> int: # Initialize image processor __lowercase : List[str] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __lowercase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , numpify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , np.ndarray ) # Test not batched input __lowercase : Dict = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowercase ,__lowercase : str = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values __lowercase ,__lowercase : Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowerCamelCase ( self ) -> Tuple: # Initialize image processor __lowercase : str = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __lowercase : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase_ , torchify=UpperCamelCase_ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase_ , torch.Tensor ) # Test not batched input __lowercase : Any = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values __lowercase ,__lowercase : Tuple = self.image_processor_tester.get_expected_values(UpperCamelCase_ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __lowercase : Optional[Any] = image_processing(UpperCamelCase_ , return_tensors='''pt''' ).pixel_values __lowercase ,__lowercase : Union[str, Any] = self.image_processor_tester.get_expected_values(UpperCamelCase_ , batched=UpperCamelCase_ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
76
"""simple docstring""" import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> None: warnings.warn( '''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please''' ''' use GLPNImageProcessor instead.''' , UpperCamelCase_ , ) super().__init__(*UpperCamelCase_ , **UpperCamelCase_ )
76
1
"""simple docstring""" # flake8: noqa # Lint as: python3 a_ = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
76
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def __UpperCAmelCase ( __UpperCamelCase ): # encoder.embeddings are double copied in original FLAVA return sum(param.float().sum() if '''encoder.embeddings''' not in key else 0 for key, param in state_dict.items() ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __lowercase : Dict = key.replace('''heads.cmd.mim_head.cls.predictions''' , '''mmm_image_head''' ) __lowercase : Dict = key.replace('''heads.cmd.mlm_head.cls.predictions''' , '''mmm_text_head''' ) __lowercase : Dict = key.replace('''heads.cmd.itm_head.cls''' , '''itm_head''' ) __lowercase : Tuple = key.replace('''heads.cmd.itm_head.pooler''' , '''itm_head.pooler''' ) __lowercase : Dict = key.replace('''heads.cmd.clip_head.logit_scale''' , '''flava.logit_scale''' ) __lowercase : Optional[int] = key.replace('''heads.fairseq_mlm.cls.predictions''' , '''mlm_head''' ) __lowercase : Optional[int] = key.replace('''heads.imagenet.mim_head.cls.predictions''' , '''mim_head''' ) __lowercase : Union[str, Any] = key.replace('''mm_text_projection''' , '''flava.text_to_mm_projection''' ) __lowercase : str = key.replace('''mm_image_projection''' , '''flava.image_to_mm_projection''' ) __lowercase : Dict = key.replace('''image_encoder.module''' , '''flava.image_model''' ) __lowercase : str = key.replace('''text_encoder.module''' , '''flava.text_model''' ) __lowercase : Dict = key.replace('''mm_encoder.module.encoder.cls_token''' , '''flava.multimodal_model.cls_token''' ) __lowercase : Union[str, Any] = key.replace('''mm_encoder.module''' , '''flava.multimodal_model''' ) __lowercase : List[str] = key.replace('''text_projection''' , '''flava.text_projection''' ) __lowercase : Any = key.replace('''image_projection''' , '''flava.image_projection''' ) __lowercase : Tuple = value.float() for key, value in codebook_state_dict.items(): __lowercase : int = value return upgrade @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None ): if config_path is not None: __lowercase : Union[str, Any] = FlavaConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : Union[str, Any] = FlavaConfig() __lowercase : Any = FlavaForPreTraining(__UpperCamelCase ).eval() __lowercase : Any = convert_dalle_checkpoint(__UpperCamelCase , __UpperCamelCase , save_checkpoint=__UpperCamelCase ) if os.path.exists(__UpperCamelCase ): __lowercase : Optional[Any] = torch.load(__UpperCamelCase , map_location='''cpu''' ) else: __lowercase : List[Any] = torch.hub.load_state_dict_from_url(__UpperCamelCase , map_location='''cpu''' ) __lowercase : Optional[int] = upgrade_state_dict(__UpperCamelCase , __UpperCamelCase ) hf_model.load_state_dict(__UpperCamelCase ) __lowercase : Union[str, Any] = hf_model.state_dict() __lowercase : Optional[Any] = count_parameters(__UpperCamelCase ) __lowercase : List[Any] = count_parameters(__UpperCamelCase ) + count_parameters(__UpperCamelCase ) assert torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) hf_model.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') a_ = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
76
1
"""simple docstring""" from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run a_ = True except (ImportError, AttributeError): a_ = object def __UpperCAmelCase ( *__UpperCamelCase , **__UpperCamelCase ): pass a_ = False a_ = logging.get_logger('transformers-cli/serving') def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[Any] = pipeline( task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , ) return ServeCommand(__UpperCamelCase , args.host , args.port , args.workers ) class UpperCAmelCase_ ( snake_case ): UpperCamelCase =42 class UpperCAmelCase_ ( snake_case ): UpperCamelCase =42 UpperCamelCase =42 class UpperCAmelCase_ ( snake_case ): UpperCamelCase =42 class UpperCAmelCase_ ( snake_case ): UpperCamelCase =42 class UpperCAmelCase_ ( snake_case ): @staticmethod def _lowerCamelCase ( UpperCamelCase_ ) -> List[str]: __lowercase : Union[str, Any] = parser.add_parser( '''serve''' , help='''CLI tool to run inference requests through REST and GraphQL endpoints.''' ) serve_parser.add_argument( '''--task''' , type=UpperCamelCase_ , choices=get_supported_tasks() , help='''The task to run the pipeline on''' , ) serve_parser.add_argument('''--host''' , type=UpperCamelCase_ , default='''localhost''' , help='''Interface the server will listen on.''' ) serve_parser.add_argument('''--port''' , type=UpperCamelCase_ , default=88_88 , help='''Port the serving will listen to.''' ) serve_parser.add_argument('''--workers''' , type=UpperCamelCase_ , default=1 , help='''Number of http workers''' ) serve_parser.add_argument('''--model''' , type=UpperCamelCase_ , help='''Model\'s name or path to stored model.''' ) serve_parser.add_argument('''--config''' , type=UpperCamelCase_ , help='''Model\'s config name or path to stored model.''' ) serve_parser.add_argument('''--tokenizer''' , type=UpperCamelCase_ , help='''Tokenizer name to use.''' ) serve_parser.add_argument( '''--device''' , type=UpperCamelCase_ , default=-1 , help='''Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)''' , ) serve_parser.set_defaults(func=UpperCamelCase_ ) def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> List[str]: __lowercase : Dict = pipeline __lowercase : str = host __lowercase : Union[str, Any] = port __lowercase : Union[str, Any] = workers if not _serve_dependencies_installed: raise RuntimeError( '''Using serve command requires FastAPI and uvicorn. ''' '''Please install transformers with [serving]: pip install "transformers[serving]".''' '''Or install FastAPI and uvicorn separately.''' ) else: logger.info(F"""Serving model over {host}:{port}""" ) __lowercase : Tuple = FastAPI( routes=[ APIRoute( '''/''' , self.model_info , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''GET'''] , ), APIRoute( '''/tokenize''' , self.tokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ), APIRoute( '''/detokenize''' , self.detokenize , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ), APIRoute( '''/forward''' , self.forward , response_model=UpperCamelCase_ , response_class=UpperCamelCase_ , methods=['''POST'''] , ), ] , timeout=6_00 , ) def _lowerCamelCase ( self ) -> Tuple: run(self._app , host=self.host , port=self.port , workers=self.workers ) def _lowerCamelCase ( self ) -> Any: return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> str: try: __lowercase : Optional[int] = self._pipeline.tokenizer.tokenize(UpperCamelCase_ ) if return_ids: __lowercase : Optional[int] = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase_ ) return ServeTokenizeResult(tokens=UpperCamelCase_ , tokens_ids=UpperCamelCase_ ) else: return ServeTokenizeResult(tokens=UpperCamelCase_ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} ) def _lowerCamelCase ( self , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , UpperCamelCase_ = Body(UpperCamelCase_ , embed=UpperCamelCase_ ) , ) -> List[Any]: try: __lowercase : str = self._pipeline.tokenizer.decode(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) return ServeDeTokenizeResult(model='''''' , text=UpperCamelCase_ ) except Exception as e: raise HTTPException(status_code=5_00 , detail={'''model''': '''''', '''error''': str(UpperCamelCase_ )} ) async def _lowerCamelCase ( self , UpperCamelCase_=Body(UpperCamelCase_ , embed=UpperCamelCase_ ) ) -> Dict: # Check we don't have empty string if len(UpperCamelCase_ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model __lowercase : int = self._pipeline(UpperCamelCase_ ) return ServeForwardResult(output=UpperCamelCase_ ) except Exception as e: raise HTTPException(5_00 , {'''error''': str(UpperCamelCase_ )} )
76
"""simple docstring""" from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging a_ = logging.get_logger(__name__) class UpperCAmelCase_ ( snake_case ): UpperCamelCase =["pixel_values"] def __init__( self , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = PILImageResampling.BILINEAR , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = 1 / 2_55 , UpperCamelCase_ = True , UpperCamelCase_ = None , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: super().__init__(**UpperCamelCase_ ) __lowercase : List[str] = size if size is not None else {'''shortest_edge''': 2_56} __lowercase : Dict = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : Optional[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24} __lowercase : Dict = get_size_dict(UpperCamelCase_ ) __lowercase : Dict = do_resize __lowercase : Optional[Any] = size __lowercase : List[Any] = resample __lowercase : Dict = do_center_crop __lowercase : Any = crop_size __lowercase : List[str] = do_rescale __lowercase : List[str] = rescale_factor __lowercase : Optional[Any] = do_normalize __lowercase : Any = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __lowercase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = PILImageResampling.BICUBIC , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : List[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) if "shortest_edge" not in size: raise ValueError(F"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" ) __lowercase : List[Any] = get_resize_output_image_size(UpperCamelCase_ , size=size['''shortest_edge'''] , default_to_square=UpperCamelCase_ ) return resize(UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: __lowercase : Union[str, Any] = get_size_dict(UpperCamelCase_ ) return center_crop(UpperCamelCase_ , size=(size['''height'''], size['''width''']) , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ ) -> np.ndarray: return rescale(UpperCamelCase_ , scale=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> np.ndarray: return normalize(UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ , data_format=UpperCamelCase_ , **UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = ChannelDimension.FIRST , **UpperCamelCase_ , ) -> Optional[Any]: __lowercase : Union[str, Any] = do_resize if do_resize is not None else self.do_resize __lowercase : Tuple = size if size is not None else self.size __lowercase : Optional[Any] = get_size_dict(UpperCamelCase_ , default_to_square=UpperCamelCase_ ) __lowercase : int = resample if resample is not None else self.resample __lowercase : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop __lowercase : List[str] = crop_size if crop_size is not None else self.crop_size __lowercase : List[str] = get_size_dict(UpperCamelCase_ ) __lowercase : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale __lowercase : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor __lowercase : Dict = do_normalize if do_normalize is not None else self.do_normalize __lowercase : Tuple = image_mean if image_mean is not None else self.image_mean __lowercase : Any = image_std if image_std is not None else self.image_std __lowercase : Any = make_list_of_images(UpperCamelCase_ ) if not valid_images(UpperCamelCase_ ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None: raise ValueError('''Size must be specified if do_resize is True.''' ) if do_center_crop and crop_size is None: raise ValueError('''Crop size must be specified if do_center_crop is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. __lowercase : Optional[int] = [to_numpy_array(UpperCamelCase_ ) for image in images] if do_resize: __lowercase : Tuple = [self.resize(image=UpperCamelCase_ , size=UpperCamelCase_ , resample=UpperCamelCase_ ) for image in images] if do_center_crop: __lowercase : Any = [self.center_crop(image=UpperCamelCase_ , size=UpperCamelCase_ ) for image in images] if do_rescale: __lowercase : str = [self.rescale(image=UpperCamelCase_ , scale=UpperCamelCase_ ) for image in images] if do_normalize: __lowercase : Optional[int] = [self.normalize(image=UpperCamelCase_ , mean=UpperCamelCase_ , std=UpperCamelCase_ ) for image in images] __lowercase : str = [to_channel_dimension_format(UpperCamelCase_ , UpperCamelCase_ ) for image in images] __lowercase : Optional[Any] = {'''pixel_values''': images} return BatchFeature(data=UpperCamelCase_ , tensor_type=UpperCamelCase_ )
76
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging a_ = logging.get_logger(__name__) a_ = '▁' a_ = {'vocab_file': 'sentencepiece.bpe.model'} a_ = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } a_ = { 'xlm-roberta-base': 5_1_2, 'xlm-roberta-large': 5_1_2, 'xlm-roberta-large-finetuned-conll02-dutch': 5_1_2, 'xlm-roberta-large-finetuned-conll02-spanish': 5_1_2, 'xlm-roberta-large-finetuned-conll03-english': 5_1_2, 'xlm-roberta-large-finetuned-conll03-german': 5_1_2, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =["input_ids", "attention_mask"] def __init__( self , UpperCamelCase_ , UpperCamelCase_="<s>" , UpperCamelCase_="</s>" , UpperCamelCase_="</s>" , UpperCamelCase_="<s>" , UpperCamelCase_="<unk>" , UpperCamelCase_="<pad>" , UpperCamelCase_="<mask>" , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> None: # Mask token behave like a normal word, i.e. include the space before it __lowercase : List[Any] = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token __lowercase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase_ , ) __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase_ ) ) __lowercase : str = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __lowercase : List[Any] = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __lowercase : Tuple = 1 __lowercase : Any = len(self.sp_model ) + self.fairseq_offset __lowercase : str = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self ) -> Optional[Any]: __lowercase : int = self.__dict__.copy() __lowercase : int = None __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() return state def __setstate__( self , UpperCamelCase_ ) -> Tuple: __lowercase : List[str] = d # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowercase : str = {} __lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __lowercase : Dict = [self.cls_token_id] __lowercase : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None , UpperCamelCase_ = False ) -> List[int]: if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase_ , token_ids_a=UpperCamelCase_ , already_has_special_tokens=UpperCamelCase_ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase_ )) + [1] return [1] + ([0] * len(UpperCamelCase_ )) + [1, 1] + ([0] * len(UpperCamelCase_ )) + [1] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: __lowercase : Optional[Any] = [self.sep_token_id] __lowercase : Optional[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowerCamelCase ( self ) -> Dict: return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _lowerCamelCase ( self ) -> str: __lowercase : List[str] = {self.convert_ids_to_tokens(UpperCamelCase_ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowerCamelCase ( self , UpperCamelCase_ ) -> List[str]: return self.sp_model.encode(UpperCamelCase_ , out_type=UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> str: if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowercase : Optional[Any] = self.sp_model.PieceToId(UpperCamelCase_ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowerCamelCase ( self , UpperCamelCase_ ) -> Tuple: if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = ''''''.join(UpperCamelCase_ ).replace(UpperCamelCase_ , ''' ''' ).strip() return out_string def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: if not os.path.isdir(UpperCamelCase_ ): logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowercase : List[Any] = os.path.join( UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase_ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase_ , '''wb''' ) as fi: __lowercase : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase_ ) return (out_vocab_file,)
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): if digit_amount > 0: return round(number - int(__UpperCamelCase ) , __UpperCamelCase ) return number - int(__UpperCamelCase ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer a_ = logging.get_logger(__name__) a_ = {'vocab_file': 'vocab.txt'} a_ = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } a_ = { 'YituTech/conv-bert-base': 5_1_2, 'YituTech/conv-bert-medium-small': 5_1_2, 'YituTech/conv-bert-small': 5_1_2, } a_ = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCAmelCase_ ( snake_case ): UpperCamelCase =VOCAB_FILES_NAMES UpperCamelCase =PRETRAINED_VOCAB_FILES_MAP UpperCamelCase =PRETRAINED_INIT_CONFIGURATION UpperCamelCase =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCamelCase =ConvBertTokenizer def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ) -> List[Any]: super().__init__( UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , ) __lowercase : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get('''lowercase''' , UpperCamelCase_ ) != do_lower_case or normalizer_state.get('''strip_accents''' , UpperCamelCase_ ) != strip_accents or normalizer_state.get('''handle_chinese_chars''' , UpperCamelCase_ ) != tokenize_chinese_chars ): __lowercase : Dict = getattr(UpperCamelCase_ , normalizer_state.pop('''type''' ) ) __lowercase : List[str] = do_lower_case __lowercase : List[str] = strip_accents __lowercase : Tuple = tokenize_chinese_chars __lowercase : Optional[int] = normalizer_class(**UpperCamelCase_ ) __lowercase : Union[str, Any] = do_lower_case def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_=None ) -> List[str]: __lowercase : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> List[int]: __lowercase : List[Any] = [self.sep_token_id] __lowercase : Union[str, Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ = None ) -> Tuple[str]: __lowercase : str = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ ) return tuple(UpperCamelCase_ )
76
"""simple docstring""" def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : set[int] = set() # To detect a back edge, keep track of vertices currently in the recursion stack __lowercase : set[int] = set() return any( node not in visited and depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) for node in graph ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): visited.add(__UpperCamelCase ) rec_stk.add(__UpperCamelCase ) for node in graph[vertex]: if node not in visited: if depth_first_search(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): return True elif node in rec_stk: return True # The node needs to be removed from recursion stack before function ends rec_stk.remove(__UpperCamelCase ) return False if __name__ == "__main__": from doctest import testmod testmod()
76
1
"""simple docstring""" import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() a_ = logging.get_logger(__name__) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase=False ): __lowercase : str = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""blocks.{i}.norm1.weight""", f"""vit.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""blocks.{i}.norm1.bias""", f"""vit.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append((f"""blocks.{i}.attn.proj.weight""", f"""vit.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.attn.proj.bias""", f"""vit.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""blocks.{i}.norm2.weight""", f"""vit.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""blocks.{i}.norm2.bias""", f"""vit.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.weight""", f"""vit.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc1.bias""", f"""vit.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.weight""", f"""vit.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""blocks.{i}.mlp.fc2.bias""", f"""vit.encoder.layer.{i}.output.dense.bias""") ) # projection layer + position embeddings rename_keys.extend( [ ('''cls_token''', '''vit.embeddings.cls_token'''), ('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''), ('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''), ('''pos_embed''', '''vit.embeddings.position_embeddings'''), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ('''norm.weight''', '''layernorm.weight'''), ('''norm.bias''', '''layernorm.bias'''), ('''pre_logits.fc.weight''', '''pooler.dense.weight'''), ('''pre_logits.fc.bias''', '''pooler.dense.bias'''), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __lowercase : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ('''norm.weight''', '''vit.layernorm.weight'''), ('''norm.bias''', '''vit.layernorm.bias'''), ('''head.weight''', '''classifier.weight'''), ('''head.bias''', '''classifier.bias'''), ] ) return rename_keys def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=False ): for i in range(config.num_hidden_layers ): if base_model: __lowercase : Optional[int] = '''''' else: __lowercase : Any = '''vit.''' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __lowercase : str = state_dict.pop(f"""blocks.{i}.attn.qkv.weight""" ) __lowercase : Tuple = state_dict.pop(f"""blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict __lowercase : Optional[int] = in_proj_weight[ : config.hidden_size, : ] __lowercase : int = in_proj_bias[: config.hidden_size] __lowercase : int = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __lowercase : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __lowercase : Tuple = in_proj_weight[ -config.hidden_size :, : ] __lowercase : str = in_proj_bias[-config.hidden_size :] def __UpperCAmelCase ( __UpperCamelCase ): __lowercase : Optional[int] = ['''head.weight''', '''head.bias'''] for k in ignore_keys: state_dict.pop(__UpperCamelCase , __UpperCamelCase ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Any = dct.pop(__UpperCamelCase ) __lowercase : Optional[Any] = val def __UpperCAmelCase ( ): __lowercase : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg''' __lowercase : Any = Image.open(requests.get(__UpperCamelCase , stream=__UpperCamelCase ).raw ) return im @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase ): __lowercase : Union[str, Any] = ViTConfig() __lowercase : Optional[Any] = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": __lowercase : Tuple = True __lowercase : int = int(vit_name[-12:-10] ) __lowercase : str = int(vit_name[-9:-6] ) else: __lowercase : Union[str, Any] = 10_00 __lowercase : Any = '''huggingface/label-files''' __lowercase : str = '''imagenet-1k-id2label.json''' __lowercase : int = json.load(open(hf_hub_download(__UpperCamelCase , __UpperCamelCase , repo_type='''dataset''' ) , '''r''' ) ) __lowercase : Any = {int(__UpperCamelCase ): v for k, v in idalabel.items()} __lowercase : str = idalabel __lowercase : Optional[Any] = {v: k for k, v in idalabel.items()} __lowercase : Dict = int(vit_name[-6:-4] ) __lowercase : List[Any] = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith('''tiny''' ): __lowercase : Union[str, Any] = 1_92 __lowercase : int = 7_68 __lowercase : Optional[Any] = 12 __lowercase : Optional[Any] = 3 elif vit_name[9:].startswith('''small''' ): __lowercase : Optional[int] = 3_84 __lowercase : List[Any] = 15_36 __lowercase : Optional[Any] = 12 __lowercase : str = 6 else: pass else: if vit_name[4:].startswith('''small''' ): __lowercase : Dict = 7_68 __lowercase : List[str] = 23_04 __lowercase : Union[str, Any] = 8 __lowercase : Dict = 8 elif vit_name[4:].startswith('''base''' ): pass elif vit_name[4:].startswith('''large''' ): __lowercase : Optional[int] = 10_24 __lowercase : Union[str, Any] = 40_96 __lowercase : Union[str, Any] = 24 __lowercase : Tuple = 16 elif vit_name[4:].startswith('''huge''' ): __lowercase : str = 12_80 __lowercase : Tuple = 51_20 __lowercase : Union[str, Any] = 32 __lowercase : Any = 16 # load original model from timm __lowercase : Union[str, Any] = timm.create_model(__UpperCamelCase , pretrained=__UpperCamelCase ) timm_model.eval() # load state_dict of original model, remove and rename some keys __lowercase : Optional[int] = timm_model.state_dict() if base_model: remove_classification_head_(__UpperCamelCase ) __lowercase : int = create_rename_keys(__UpperCamelCase , __UpperCamelCase ) for src, dest in rename_keys: rename_key(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) read_in_q_k_v(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) # load HuggingFace model if vit_name[-5:] == "in21k": __lowercase : Dict = ViTModel(__UpperCamelCase ).eval() else: __lowercase : Any = ViTForImageClassification(__UpperCamelCase ).eval() model.load_state_dict(__UpperCamelCase ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: __lowercase : Optional[int] = DeiTImageProcessor(size=config.image_size ) else: __lowercase : int = ViTImageProcessor(size=config.image_size ) __lowercase : str = image_processor(images=prepare_img() , return_tensors='''pt''' ) __lowercase : str = encoding['''pixel_values'''] __lowercase : Dict = model(__UpperCamelCase ) if base_model: __lowercase : Tuple = timm_model.forward_features(__UpperCamelCase ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(__UpperCamelCase , outputs.pooler_output , atol=1e-3 ) else: __lowercase : Any = timm_model(__UpperCamelCase ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(__UpperCamelCase , outputs.logits , atol=1e-3 ) Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase ) print(f"""Saving model {vit_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(__UpperCamelCase ) print(f"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) a_ = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
76
"""simple docstring""" import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) a_ = logging.getLogger(__name__) class UpperCAmelCase_ ( snake_case ): def _lowerCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None , UpperCamelCase_=None ) -> Optional[Any]: __lowercase : Tuple = self.layer[current_layer](UpperCamelCase_ , UpperCamelCase_ , head_mask[current_layer] ) __lowercase : Any = layer_outputs[0] return hidden_states @add_start_docstrings( "The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top." , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> int: super().__init__(UpperCamelCase_ ) __lowercase : Optional[Any] = BertEncoderWithPabee(UpperCamelCase_ ) self.init_weights() __lowercase : str = 0 __lowercase : Optional[Any] = 0 __lowercase : Optional[int] = 0 __lowercase : int = 0 def _lowerCamelCase ( self , UpperCamelCase_ ) -> Dict: __lowercase : Tuple = threshold def _lowerCamelCase ( self , UpperCamelCase_ ) -> Union[str, Any]: __lowercase : Optional[int] = patience def _lowerCamelCase ( self ) -> List[str]: __lowercase : Tuple = 0 __lowercase : Tuple = 0 def _lowerCamelCase ( self ) -> List[Any]: __lowercase : Optional[int] = self.inference_layers_num / self.inference_instances_num __lowercase : int = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(UpperCamelCase_ ) @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=False , ) -> Union[str, Any]: if input_ids is not None and inputs_embeds is not None: raise ValueError('''You cannot specify both input_ids and inputs_embeds at the same time''' ) elif input_ids is not None: __lowercase : Tuple = input_ids.size() elif inputs_embeds is not None: __lowercase : List[Any] = inputs_embeds.size()[:-1] else: raise ValueError('''You have to specify either input_ids or inputs_embeds''' ) __lowercase : int = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: __lowercase : Dict = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) if token_type_ids is None: __lowercase : int = torch.zeros(UpperCamelCase_ , dtype=torch.long , device=UpperCamelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. __lowercase : torch.Tensor = self.get_extended_attention_mask(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: __lowercase ,__lowercase ,__lowercase : Optional[int] = encoder_hidden_states.size() __lowercase : Any = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: __lowercase : List[str] = torch.ones(UpperCamelCase_ , device=UpperCamelCase_ ) __lowercase : Tuple = self.invert_attention_mask(UpperCamelCase_ ) else: __lowercase : Tuple = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] __lowercase : Optional[int] = self.get_head_mask(UpperCamelCase_ , self.config.num_hidden_layers ) __lowercase : Optional[int] = self.embeddings( input_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ ) __lowercase : Union[str, Any] = embedding_output if self.training: __lowercase : List[Any] = [] for i in range(self.config.num_hidden_layers ): __lowercase : str = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : int = self.pooler(UpperCamelCase_ ) __lowercase : str = output_layers[i](output_dropout(UpperCamelCase_ ) ) res.append(UpperCamelCase_ ) elif self.patience == 0: # Use all layers for inference __lowercase : int = self.encoder( UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ , encoder_hidden_states=UpperCamelCase_ , encoder_attention_mask=UpperCamelCase_ , ) __lowercase : Optional[Any] = self.pooler(encoder_outputs[0] ) __lowercase : int = [output_layers[self.config.num_hidden_layers - 1](UpperCamelCase_ )] else: __lowercase : Optional[int] = 0 __lowercase : Union[str, Any] = None __lowercase : int = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 __lowercase : Tuple = self.encoder.adaptive_forward( UpperCamelCase_ , current_layer=UpperCamelCase_ , attention_mask=UpperCamelCase_ , head_mask=UpperCamelCase_ ) __lowercase : Dict = self.pooler(UpperCamelCase_ ) __lowercase : Optional[int] = output_layers[i](UpperCamelCase_ ) if regression: __lowercase : Any = logits.detach() if patient_result is not None: __lowercase : List[str] = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: __lowercase : int = 0 else: __lowercase : List[str] = logits.detach().argmax(dim=1 ) if patient_result is not None: __lowercase : Optional[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(UpperCamelCase_ ) ): patient_counter += 1 else: __lowercase : Tuple = 0 __lowercase : Union[str, Any] = logits if patient_counter == self.patience: break __lowercase : Optional[int] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( "Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. " , snake_case , ) class UpperCAmelCase_ ( snake_case ): def __init__( self , UpperCamelCase_ ) -> Optional[Any]: super().__init__(UpperCamelCase_ ) __lowercase : List[Any] = config.num_labels __lowercase : int = BertModelWithPabee(UpperCamelCase_ ) __lowercase : int = nn.Dropout(config.hidden_dropout_prob ) __lowercase : Union[str, Any] = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCamelCase_ ) def _lowerCamelCase ( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=None , ) -> int: __lowercase : Union[str, Any] = self.bert( input_ids=UpperCamelCase_ , attention_mask=UpperCamelCase_ , token_type_ids=UpperCamelCase_ , position_ids=UpperCamelCase_ , head_mask=UpperCamelCase_ , inputs_embeds=UpperCamelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) __lowercase : List[str] = (logits[-1],) if labels is not None: __lowercase : Any = None __lowercase : Optional[int] = 0 for ix, logits_item in enumerate(UpperCamelCase_ ): if self.num_labels == 1: # We are doing regression __lowercase : Any = MSELoss() __lowercase : Any = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: __lowercase : str = CrossEntropyLoss() __lowercase : Dict = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: __lowercase : List[str] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 __lowercase : Union[str, Any] = (total_loss / total_weights,) + outputs return outputs
76
1
"""simple docstring""" from __future__ import annotations a_ = [True] * 1_0_0_0_0_0_1 a_ = 2 while i * i <= 1_0_0_0_0_0_0: if seive[i]: for j in range(i * i, 1_0_0_0_0_0_1, i): a_ = False i += 1 def __UpperCAmelCase ( __UpperCamelCase ): return seive[n] def __UpperCAmelCase ( __UpperCamelCase ): return any(digit in '''02468''' for digit in str(__UpperCamelCase ) ) def __UpperCAmelCase ( __UpperCamelCase = 1_00_00_00 ): __lowercase : int = [2] # result already includes the number 2. for num in range(3 , limit + 1 , 2 ): if is_prime(__UpperCamelCase ) and not contains_an_even_digit(__UpperCamelCase ): __lowercase : Dict = str(__UpperCamelCase ) __lowercase : Dict = [int(str_num[j:] + str_num[:j] ) for j in range(len(__UpperCamelCase ) )] if all(is_prime(__UpperCamelCase ) for i in list_nums ): result.append(__UpperCamelCase ) return result def __UpperCAmelCase ( ): return len(find_circular_primes() ) if __name__ == "__main__": print(F"{len(find_circular_primes()) = }")
76
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() a_ = logging.get_logger(__name__) a_ = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): for attribute in key.split('''.''' ): __lowercase : str = getattr(__UpperCamelCase , __UpperCamelCase ) if weight_type is not None: __lowercase : int = getattr(__UpperCamelCase , __UpperCamelCase ).shape else: __lowercase : int = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": __lowercase : List[str] = value elif weight_type == "weight_g": __lowercase : Optional[Any] = value elif weight_type == "weight_v": __lowercase : Tuple = value elif weight_type == "bias": __lowercase : Dict = value else: __lowercase : Union[str, Any] = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : Tuple = [] __lowercase : Union[str, Any] = fairseq_model.state_dict() __lowercase : Optional[Any] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __lowercase : Union[str, Any] = False if "conv_layers" in name: load_conv_layer( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , hf_model.config.feat_extract_norm == '''group''' , ) __lowercase : List[str] = True else: for key, mapped_key in MAPPING.items(): __lowercase : List[str] = '''hubert.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key if key in name or (key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0] and not is_finetuned): __lowercase : int = True if "*" in mapped_key: __lowercase : Union[str, Any] = name.split(__UpperCamelCase )[0].split('''.''' )[-2] __lowercase : Tuple = mapped_key.replace('''*''' , __UpperCamelCase ) if "weight_g" in name: __lowercase : Tuple = '''weight_g''' elif "weight_v" in name: __lowercase : Optional[int] = '''weight_v''' elif "weight" in name: __lowercase : str = '''weight''' elif "bias" in name: __lowercase : Optional[int] = '''bias''' else: __lowercase : List[str] = None set_recursively(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) continue if not is_used: unused_weights.append(__UpperCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ): __lowercase : List[Any] = full_name.split('''conv_layers.''' )[-1] __lowercase : str = name.split('''.''' ) __lowercase : Dict = int(items[0] ) __lowercase : Any = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) __lowercase : List[str] = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) __lowercase : Union[str, Any] = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) __lowercase : Tuple = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(__UpperCamelCase ) @torch.no_grad() def __UpperCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True ): if config_path is not None: __lowercase : Dict = HubertConfig.from_pretrained(__UpperCamelCase ) else: __lowercase : str = HubertConfig() if is_finetuned: if dict_path: __lowercase : Tuple = Dictionary.load(__UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __lowercase : int = target_dict.pad_index __lowercase : Union[str, Any] = target_dict.bos_index __lowercase : int = target_dict.eos_index __lowercase : int = len(target_dict.symbols ) __lowercase : Dict = os.path.join(__UpperCamelCase , '''vocab.json''' ) if not os.path.isdir(__UpperCamelCase ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(__UpperCamelCase ) ) return os.makedirs(__UpperCamelCase , exist_ok=__UpperCamelCase ) with open(__UpperCamelCase , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(target_dict.indices , __UpperCamelCase ) __lowercase : str = WavaVecaCTCTokenizer( __UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=__UpperCamelCase , ) __lowercase : str = True if config.feat_extract_norm == '''layer''' else False __lowercase : Any = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCamelCase , return_attention_mask=__UpperCamelCase , ) __lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=__UpperCamelCase , tokenizer=__UpperCamelCase ) processor.save_pretrained(__UpperCamelCase ) __lowercase : Optional[Any] = HubertForCTC(__UpperCamelCase ) else: __lowercase : Union[str, Any] = HubertModel(__UpperCamelCase ) if is_finetuned: __lowercase ,__lowercase ,__lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __lowercase ,__lowercase ,__lowercase : Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __lowercase : Union[str, Any] = model[0].eval() recursively_load_weights(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) hf_wavavec.save_pretrained(__UpperCamelCase ) if __name__ == "__main__": a_ = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) a_ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
76
1
"""simple docstring""" from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=snake_case ): UpperCamelCase =["torch", "scipy"] def __init__( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> Any: requires_backends(self , ['''torch''', '''scipy'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Union[str, Any]: requires_backends(cls , ['''torch''', '''scipy'''] ) @classmethod def _lowerCamelCase ( cls , *UpperCamelCase_ , **UpperCamelCase_ ) -> Dict: requires_backends(cls , ['''torch''', '''scipy'''] )
76
"""simple docstring""" a_ = { 'Pillow': 'Pillow<10.0.0', 'accelerate': 'accelerate>=0.20.3', 'av': 'av==9.2.0', 'beautifulsoup4': 'beautifulsoup4', 'black': 'black~=23.1', 'codecarbon': 'codecarbon==1.2.0', 'cookiecutter': 'cookiecutter==1.7.3', 'dataclasses': 'dataclasses', 'datasets': 'datasets!=2.5.0', 'decord': 'decord==0.6.0', 'deepspeed': 'deepspeed>=0.9.3', 'diffusers': 'diffusers', 'dill': 'dill<0.3.5', 'evaluate': 'evaluate>=0.2.0', 'fairscale': 'fairscale>0.3', 'faiss-cpu': 'faiss-cpu', 'fastapi': 'fastapi', 'filelock': 'filelock', 'flax': 'flax>=0.4.1,<=0.7.0', 'ftfy': 'ftfy', 'fugashi': 'fugashi>=1.0', 'GitPython': 'GitPython<3.1.19', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.14.1,<1.0', 'importlib_metadata': 'importlib_metadata', 'ipadic': 'ipadic>=1.0.0,<2.0', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2,<=0.4.13', 'jaxlib': 'jaxlib>=0.1.65,<=0.4.13', 'jieba': 'jieba', 'kenlm': 'kenlm', 'keras-nlp': 'keras-nlp>=0.3.1', 'librosa': 'librosa', 'nltk': 'nltk', 'natten': 'natten>=0.14.6', 'numpy': 'numpy>=1.17', 'onnxconverter-common': 'onnxconverter-common', 'onnxruntime-tools': 'onnxruntime-tools>=1.4.2', 'onnxruntime': 'onnxruntime>=1.4.0', 'opencv-python': 'opencv-python', 'optuna': 'optuna', 'optax': 'optax>=0.0.8,<=0.1.4', 'packaging': 'packaging>=20.0', 'parameterized': 'parameterized', 'phonemizer': 'phonemizer', 'protobuf': 'protobuf', 'psutil': 'psutil', 'pyyaml': 'pyyaml>=5.1', 'pydantic': 'pydantic<2', 'pytest': 'pytest>=7.2.0', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'python': 'python>=3.8.0', 'ray[tune]': 'ray[tune]', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'rhoknp': 'rhoknp>=1.1.0,<1.3.1', 'rjieba': 'rjieba', 'rouge-score': 'rouge-score!=0.0.7,!=0.0.8,!=0.1,!=0.1.1', 'ruff': 'ruff>=0.0.241,<=0.0.259', 'sacrebleu': 'sacrebleu>=1.4.12,<2.0.0', 'sacremoses': 'sacremoses', 'safetensors': 'safetensors>=0.3.1', 'sagemaker': 'sagemaker>=2.31.0', 'scikit-learn': 'scikit-learn', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'sigopt': 'sigopt', 'starlette': 'starlette', 'sudachipy': 'sudachipy>=0.6.6', 'sudachidict_core': 'sudachidict_core>=20220729', 'tensorflow-cpu': 'tensorflow-cpu>=2.6,<2.14', 'tensorflow': 'tensorflow>=2.6,<2.14', 'tensorflow-text': 'tensorflow-text<2.14', 'tf2onnx': 'tf2onnx', 'timeout-decorator': 'timeout-decorator', 'timm': 'timm', 'tokenizers': 'tokenizers>=0.11.1,!=0.11.3,<0.14', 'torch': 'torch>=1.9,!=1.12.0', 'torchaudio': 'torchaudio', 'torchvision': 'torchvision', 'pyctcdecode': 'pyctcdecode>=0.4.0', 'tqdm': 'tqdm>=4.27', 'unidic': 'unidic>=1.0.2', 'unidic_lite': 'unidic_lite>=1.0.7', 'urllib3': 'urllib3<2.0.0', 'uvicorn': 'uvicorn', }
76
1