code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
def a__ ( A_ ): '''simple docstring''' __magic_name__ = """""" for ch in key: if ch == " " or ch not in key_no_dups and ch.isalpha(): key_no_dups += ch return key_no_dups def a__ ( A_ ): '''simple docstring''' __magic_name__ = [chr(i + 65 ) for i in range(26 )] # Remove duplicate characters from key __magic_name__ = remove_duplicates(key.upper() ) __magic_name__ = len(A_ ) # First fill cipher with key characters __magic_name__ = {alphabet[i]: char for i, char in enumerate(A_ )} # Then map remaining characters in alphabet to # the alphabet from the beginning for i in range(len(A_ ), 26 ): __magic_name__ = alphabet[i - offset] # Ensure we are not mapping letters to letters previously mapped while char in key: offset -= 1 __magic_name__ = alphabet[i - offset] __magic_name__ = char return cipher_alphabet def a__ ( A_, A_ ): '''simple docstring''' return "".join(cipher_map.get(A_, A_ ) for ch in message.upper() ) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = {v: k for k, v in cipher_map.items()} return "".join(rev_cipher_map.get(A_, A_ ) for ch in message.upper() ) def a__ ( ): '''simple docstring''' __magic_name__ = input("""Enter message to encode or decode: """ ).strip() __magic_name__ = input("""Enter keyword: """ ).strip() __magic_name__ = input("""Encipher or decipher? E/D:""" ).strip()[0].lower() try: __magic_name__ = {"""e""": encipher, """d""": decipher}[option] except KeyError: raise KeyError("""invalid input option""" ) __magic_name__ = create_cipher_map(A_ ) print(func(A_, A_ ) ) if __name__ == "__main__": import doctest doctest.testmod() main()
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
import numpy class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : numpy.ndarray , UpperCamelCase__ : numpy.ndarray ) -> None: """simple docstring""" __magic_name__ = input_array # Random initial weights are assigned where first argument is the # number of nodes in previous layer and second argument is the # number of nodes in the next layer. # Random initial weights are assigned. # self.input_array.shape[1] is used to represent number of nodes in input layer. # First hidden layer consists of 4 nodes. __magic_name__ = numpy.random.rand( self.input_array.shape[1] , 4 ) # Random initial values for the first hidden layer. # First hidden layer has 4 nodes. # Second hidden layer has 3 nodes. __magic_name__ = numpy.random.rand( 4 , 3 ) # Random initial values for the second hidden layer. # Second hidden layer has 3 nodes. # Output layer has 1 node. __magic_name__ = numpy.random.rand(3 , 1 ) # Real output values provided. __magic_name__ = output_array # Predicted output values by the neural network. # Predicted_output array initially consists of zeroes. __magic_name__ = numpy.zeros(output_array.shape ) def _lowercase ( self : List[str] ) -> numpy.ndarray: """simple docstring""" __magic_name__ = sigmoid( numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) ) # layer_between_first_hidden_layer_and_second_hidden_layer is the layer # connecting the first hidden set of nodes with the second hidden set of nodes. __magic_name__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) # layer_between_second_hidden_layer_and_output is the layer connecting # second hidden layer with the output node. __magic_name__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return self.layer_between_second_hidden_layer_and_output def _lowercase ( self : Union[str, Any] ) -> None: """simple docstring""" __magic_name__ = numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , ) __magic_name__ = numpy.dot( self.layer_between_input_and_first_hidden_layer.T , numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , ) __magic_name__ = numpy.dot( self.input_array.T , numpy.dot( numpy.dot( 2 * (self.output_array - self.predicted_output) * sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , ) * sigmoid_derivative( self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , ) * sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , ) self.input_layer_and_first_hidden_layer_weights += ( updated_input_layer_and_first_hidden_layer_weights ) self.first_hidden_layer_and_second_hidden_layer_weights += ( updated_first_hidden_layer_and_second_hidden_layer_weights ) self.second_hidden_layer_and_output_layer_weights += ( updated_second_hidden_layer_and_output_layer_weights ) def _lowercase ( self : List[str] , UpperCamelCase__ : numpy.ndarray , UpperCamelCase__ : int , UpperCamelCase__ : bool ) -> None: """simple docstring""" for iteration in range(1 , iterations + 1 ): __magic_name__ = self.feedforward() self.back_propagation() if give_loss: __magic_name__ = numpy.mean(numpy.square(output - self.feedforward() ) ) print(F'''Iteration {iteration} Loss: {loss}''' ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : numpy.ndarray ) -> int: """simple docstring""" __magic_name__ = input_arr __magic_name__ = sigmoid( numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) ) __magic_name__ = sigmoid( numpy.dot( self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) ) __magic_name__ = sigmoid( numpy.dot( self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) ) return int(self.layer_between_second_hidden_layer_and_output > 0.6 ) def a__ ( A_ ): '''simple docstring''' return 1 / (1 + numpy.exp(-value )) def a__ ( A_ ): '''simple docstring''' return (value) * (1 - (value)) def a__ ( ): '''simple docstring''' __magic_name__ = numpy.array( ( [0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], [1, 1, 1], ), dtype=numpy.floataa, ) # True output values for the given input values. __magic_name__ = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]), dtype=numpy.floataa ) # Calling neural network class. __magic_name__ = TwoHiddenLayerNeuralNetwork( input_array=A_, output_array=A_ ) # Calling training function. # Set give_loss to True if you want to see loss in every iteration. neural_network.train(output=A_, iterations=10, give_loss=A_ ) return neural_network.predict(numpy.array(([1, 1, 1]), dtype=numpy.floataa ) ) if __name__ == "__main__": example()
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer __lowerCAmelCase : Any = logging.get_logger(__name__) __lowerCAmelCase : int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : Dict = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : Dict = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : Tuple = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : Any = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } __lowerCAmelCase : Optional[Any] = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } __lowerCAmelCase : Tuple = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } __lowerCAmelCase : Any = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } __lowerCAmelCase : Tuple = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } __lowerCAmelCase : List[Any] = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION __lowerCAmelCase : Tuple = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) __lowerCAmelCase : Tuple = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) __lowerCAmelCase : Tuple = R'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(_A ) class UpperCAmelCase_ : '''simple docstring''' def __call__( self : List[Any] , UpperCamelCase__ : int , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Union[bool, str] = False , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = None , **UpperCamelCase__ : Optional[Any] , ) -> BatchEncoding: """simple docstring""" if titles is None and texts is None: return super().__call__( UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) elif titles is None or texts is None: __magic_name__ = titles if texts is None else texts return super().__call__( UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ , **UpperCamelCase__ , ) __magic_name__ = titles if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [titles] __magic_name__ = texts if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [texts] __magic_name__ = len(UpperCamelCase__ ) __magic_name__ = questions if not isinstance(UpperCamelCase__ , UpperCamelCase__ ) else [questions] * n_passages if len(UpperCamelCase__ ) != len(UpperCamelCase__ ): raise ValueError( F'''There should be as many titles than texts but got {len(UpperCamelCase__ )} titles and {len(UpperCamelCase__ )} texts.''' ) __magic_name__ = super().__call__(UpperCamelCase__ , UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""] __magic_name__ = super().__call__(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ , padding=UpperCamelCase__ , truncation=UpperCamelCase__ )["""input_ids"""] __magic_name__ = { """input_ids""": [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(UpperCamelCase__ , UpperCamelCase__ ) ] } if return_attention_mask is not False: __magic_name__ = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] ) __magic_name__ = attention_mask return self.pad(UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_tensors=UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : BatchEncoding , UpperCamelCase__ : DPRReaderOutput , UpperCamelCase__ : int = 16 , UpperCamelCase__ : int = 64 , UpperCamelCase__ : int = 4 , ) -> List[DPRSpanPrediction]: """simple docstring""" __magic_name__ = reader_input["""input_ids"""] __magic_name__ , __magic_name__ , __magic_name__ = reader_output[:3] __magic_name__ = len(UpperCamelCase__ ) __magic_name__ = sorted(range(UpperCamelCase__ ) , reverse=UpperCamelCase__ , key=relevance_logits.__getitem__ ) __magic_name__ = [] for doc_id in sorted_docs: __magic_name__ = list(input_ids[doc_id] ) # assuming question & title information is at the beginning of the sequence __magic_name__ = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: __magic_name__ = sequence_ids.index(self.pad_token_id ) else: __magic_name__ = len(UpperCamelCase__ ) __magic_name__ = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=UpperCamelCase__ , top_spans=UpperCamelCase__ , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=UpperCamelCase__ , start_index=UpperCamelCase__ , end_index=UpperCamelCase__ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) ) if len(UpperCamelCase__ ) >= num_spans: break return nbest_spans_predictions[:num_spans] def _lowercase ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , ) -> List[DPRSpanPrediction]: """simple docstring""" __magic_name__ = [] for start_index, start_score in enumerate(UpperCamelCase__ ): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ): scores.append(((start_index, start_index + answer_length), start_score + end_score) ) __magic_name__ = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : x[1] , reverse=UpperCamelCase__ ) __magic_name__ = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(F'''Wrong span indices: [{start_index}:{end_index}]''' ) __magic_name__ = end_index - start_index + 1 if length > max_answer_length: raise ValueError(F'''Span is too long: {length} > {max_answer_length}''' ) if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals ): continue chosen_span_intervals.append((start_index, end_index) ) if len(UpperCamelCase__ ) == top_spans: break return chosen_span_intervals @add_end_docstrings(_A ) class UpperCAmelCase_ ( _A , _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = READER_PRETRAINED_VOCAB_FILES_MAP a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = READER_PRETRAINED_INIT_CONFIGURATION a__ = ["""input_ids""", """attention_mask"""]
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
from manim import * class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : Optional[int] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = Rectangle(height=0.5 , width=0.5 ) __magic_name__ = Rectangle(height=0.25 , width=0.25 ) __magic_name__ = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""CPU""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [mem.copy() for i in range(4 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""GPU""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) gpu.move_to([-1, -1, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""Model""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) model.move_to([3, -1.0, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [] __magic_name__ = [] __magic_name__ = [] for i, rect in enumerate(UpperCamelCase__ ): rect.set_stroke(UpperCamelCase__ ) __magic_name__ = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(UpperCamelCase__ , opacity=0.7 ) if i == 0: cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=UpperCamelCase__ ) cpu_target.set_x(cpu_target.get_x() + 0.1 ) elif i == 3: cpu_target.next_to(model_cpu_arr[0] , direction=UpperCamelCase__ , buff=0.0 ) else: cpu_target.next_to(model_cpu_arr[i - 1] , direction=UpperCamelCase__ , buff=0.0 ) self.add(UpperCamelCase__ ) model_cpu_arr.append(UpperCamelCase__ ) self.add(*UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) __magic_name__ = [mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""Loaded Checkpoint""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) checkpoint.move_to([3, 0.5, 0] ) self.add(UpperCamelCase__ ) __magic_name__ = [] __magic_name__ = [] for i, rect in enumerate(UpperCamelCase__ ): __magic_name__ = fill.copy().set_fill(UpperCamelCase__ , opacity=0.7 ) target.move_to(UpperCamelCase__ ) ckpt_arr.append(UpperCamelCase__ ) __magic_name__ = target.copy() if i < 5: cpu_target.move_to(cpu_left_col_base[i + 1] ) else: cpu_target.move_to(cpu_right_col_base[i - 5] ) ckpt_cpu_arr.append(UpperCamelCase__ ) self.add(*UpperCamelCase__ , *UpperCamelCase__ ) __magic_name__ = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) __magic_name__ = MarkupText( F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , ) key_text.move_to([-5, 2.4, 0] ) self.add(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = MarkupText( F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , ) blue_text.next_to(UpperCamelCase__ , DOWN * 2.4 , aligned_edge=key_text.get_left() ) self.add(UpperCamelCase__ ) __magic_name__ = MarkupText( F'''Based on the passed in configuration, weights are stored in\na variety of np.memmaps on disk or to a particular device.''' , font_size=24 , ) step_a.move_to([2, 2, 0] ) __magic_name__ = [meta_mem.copy() for i in range(6 )] __magic_name__ = [meta_mem.copy() for i in range(6 )] __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(*UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = VGroup(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0 ) __magic_name__ = Text("""Disk""" , font_size=24 ) __magic_name__ = Group(UpperCamelCase__ , UpperCamelCase__ ).arrange(UpperCamelCase__ , buff=0.5 , aligned_edge=UpperCamelCase__ ) disk.move_to([-4.0, -1.25, 0] ) self.play(Write(UpperCamelCase__ , run_time=3 ) , Write(UpperCamelCase__ , run_time=1 ) , Create(UpperCamelCase__ , run_time=1 ) ) __magic_name__ = [] for i, rect in enumerate(UpperCamelCase__ ): __magic_name__ = rect.copy() target.generate_target() target.target.move_to(disk_left_col_base[i] ).scale(0.5 ) animations.append(MoveToTarget(UpperCamelCase__ , run_time=1.5 ) ) self.play(*UpperCamelCase__ ) self.play(FadeOut(UpperCamelCase__ ) ) __magic_name__ = MarkupText(F'''Then, the checkpoint is removed from memory\nthrough garbage collection.''' , font_size=24 ) step_a.move_to([2, 2, 0] ) self.play(Write(UpperCamelCase__ , run_time=3 ) ) self.play( FadeOut(UpperCamelCase__ , UpperCamelCase__ , *UpperCamelCase__ , *UpperCamelCase__ ) , ) self.wait()
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
from collections import deque def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) __magic_name__ = deque() __magic_name__ = [False for _ in range(A_ )] __magic_name__ = [-1 for _ in range(A_ )] __magic_name__ = index_of[:] def strong_connect(A_, A_, A_ ): __magic_name__ = index # the number when this node is seen __magic_name__ = index # lowest rank node reachable from here index += 1 stack.append(A_ ) __magic_name__ = True for w in g[v]: if index_of[w] == -1: __magic_name__ = strong_connect(A_, A_, A_ ) __magic_name__ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) elif on_stack[w]: __magic_name__ = ( lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v] ) if lowlink_of[v] == index_of[v]: __magic_name__ = [] __magic_name__ = stack.pop() __magic_name__ = False component.append(A_ ) while w != v: __magic_name__ = stack.pop() __magic_name__ = False component.append(A_ ) components.append(A_ ) return index __magic_name__ = [] for v in range(A_ ): if index_of[v] == -1: strong_connect(A_, 0, A_ ) return components def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = [[] for _ in range(A_ )] for u, v in edges: g[u].append(A_ ) return g if __name__ == "__main__": # Test __lowerCAmelCase : Tuple = 7 __lowerCAmelCase : Union[str, Any] = [0, 0, 1, 2, 3, 3, 4, 4, 6] __lowerCAmelCase : Dict = [1, 3, 2, 0, 1, 4, 5, 6, 5] __lowerCAmelCase : Optional[int] = [(u, v) for u, v in zip(source, target)] __lowerCAmelCase : str = create_graph(n_vertices, edges) assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g)
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_torch_available, ) __lowerCAmelCase : List[str] = { 'configuration_encodec': [ 'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP', 'EncodecConfig', ], 'feature_extraction_encodec': ['EncodecFeatureExtractor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = [ 'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST', 'EncodecModel', 'EncodecPreTrainedModel', ] if TYPE_CHECKING: from .configuration_encodec import ( ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP, EncodecConfig, ) from .feature_extraction_encodec import EncodecFeatureExtractor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encodec import ( ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST, EncodecModel, EncodecPreTrainedModel, ) else: import sys __lowerCAmelCase : Tuple = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
1
from __future__ import annotations from collections import Counter from random import random class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = {} def _lowercase ( self : Dict , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = {} def _lowercase ( self : Any , UpperCamelCase__ : str , UpperCamelCase__ : str , UpperCamelCase__ : float ) -> None: """simple docstring""" if nodea not in self.connections: self.add_node(UpperCamelCase__ ) if nodea not in self.connections: self.add_node(UpperCamelCase__ ) __magic_name__ = probability def _lowercase ( self : Optional[Any] ) -> list[str]: """simple docstring""" return list(self.connections ) def _lowercase ( self : Any , UpperCamelCase__ : str ) -> str: """simple docstring""" __magic_name__ = 0 __magic_name__ = random() for dest in self.connections[node]: current_probability += self.connections[node][dest] if current_probability > random_value: return dest return "" def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = MarkovChainGraphUndirectedUnweighted() for nodea, nodea, probability in transitions: graph.add_transition_probability(A_, A_, A_ ) __magic_name__ = Counter(graph.get_nodes() ) __magic_name__ = start for _ in range(A_ ): __magic_name__ = graph.transition(A_ ) visited[node] += 1 return visited if __name__ == "__main__": import doctest doctest.testmod()
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
1
import numpy as np import pandas as pd from sklearn.preprocessing import MinMaxScaler from tensorflow.keras.layers import LSTM, Dense from tensorflow.keras.models import Sequential if __name__ == "__main__": __lowerCAmelCase : int = pd.read_csv('sample_data.csv', header=None) __lowerCAmelCase : str = df.shape[:1][0] # If you're using some other dataset input the target column __lowerCAmelCase : Tuple = df.iloc[:, 1:2] __lowerCAmelCase : List[Any] = actual_data.values.reshape(len_data, 1) __lowerCAmelCase : Union[str, Any] = MinMaxScaler().fit_transform(actual_data) __lowerCAmelCase : int = 10 __lowerCAmelCase : Dict = 5 __lowerCAmelCase : Tuple = 20 __lowerCAmelCase : int = len_data - periods * look_back __lowerCAmelCase : str = actual_data[:division] __lowerCAmelCase : int = actual_data[division - look_back :] __lowerCAmelCase , __lowerCAmelCase : Optional[int] = [], [] __lowerCAmelCase , __lowerCAmelCase : Dict = [], [] for i in range(0, len(train_data) - forward_days - look_back + 1): train_x.append(train_data[i : i + look_back]) train_y.append(train_data[i + look_back : i + look_back + forward_days]) for i in range(0, len(test_data) - forward_days - look_back + 1): test_x.append(test_data[i : i + look_back]) test_y.append(test_data[i + look_back : i + look_back + forward_days]) __lowerCAmelCase : Any = np.array(train_x) __lowerCAmelCase : int = np.array(test_x) __lowerCAmelCase : Optional[int] = np.array([list(i.ravel()) for i in train_y]) __lowerCAmelCase : Dict = np.array([list(i.ravel()) for i in test_y]) __lowerCAmelCase : Optional[int] = Sequential() model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True)) model.add(LSTM(64, input_shape=(128, 1))) model.add(Dense(forward_days)) model.compile(loss='mean_squared_error', optimizer='adam') __lowerCAmelCase : str = model.fit( x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4 ) __lowerCAmelCase : Optional[int] = model.predict(x_test)
76
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
1
import argparse import os import torch from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) __lowerCAmelCase : Optional[Any] = { 'sample_size': 32, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': 1000, 'block_out_channels': [32, 64], 'attention_head_dim': 8, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __lowerCAmelCase : Optional[int] = { 'sample_size': 64, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 3, 'num_class_embeds': 1000, 'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'scale_shift', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __lowerCAmelCase : List[str] = { 'sample_size': 256, 'in_channels': 3, 'out_channels': 3, 'layers_per_block': 2, 'num_class_embeds': None, 'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4], 'attention_head_dim': 64, 'down_block_types': [ 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'ResnetDownsampleBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', 'AttnDownBlock2D', ], 'up_block_types': [ 'AttnUpBlock2D', 'AttnUpBlock2D', 'AttnUpBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', 'ResnetUpsampleBlock2D', ], 'resnet_time_scale_shift': 'default', 'upsample_type': 'resnet', 'downsample_type': 'resnet', } __lowerCAmelCase : Union[str, Any] = { 'num_train_timesteps': 40, 'sigma_min': 0.002, 'sigma_max': 80.0, } __lowerCAmelCase : Optional[int] = { 'num_train_timesteps': 201, 'sigma_min': 0.002, 'sigma_max': 80.0, } __lowerCAmelCase : Any = { 'num_train_timesteps': 151, 'sigma_min': 0.002, 'sigma_max': 80.0, } def a__ ( A_ ): '''simple docstring''' if isinstance(A_, A_ ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("""boolean value expected""" ) def a__ ( A_, A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.0.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.0.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.2.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.in_layers.2.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.emb_layers.1.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.emb_layers.1.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.0.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.0.bias'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.3.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.out_layers.3.bias'''] if has_skip: __magic_name__ = checkpoint[f'''{old_prefix}.skip_connection.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.skip_connection.bias'''] return new_checkpoint def a__ ( A_, A_, A_, A_, A_=None ): '''simple docstring''' __magic_name__ , __magic_name__ , __magic_name__ = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3, dim=0 ) __magic_name__ , __magic_name__ , __magic_name__ = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3, dim=0 ) __magic_name__ = checkpoint[f'''{old_prefix}.norm.weight'''] __magic_name__ = checkpoint[f'''{old_prefix}.norm.bias'''] __magic_name__ = weight_q.squeeze(-1 ).squeeze(-1 ) __magic_name__ = bias_q.squeeze(-1 ).squeeze(-1 ) __magic_name__ = weight_k.squeeze(-1 ).squeeze(-1 ) __magic_name__ = bias_k.squeeze(-1 ).squeeze(-1 ) __magic_name__ = weight_v.squeeze(-1 ).squeeze(-1 ) __magic_name__ = bias_v.squeeze(-1 ).squeeze(-1 ) __magic_name__ = ( checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 ) ) __magic_name__ = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 ) return new_checkpoint def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = {} __magic_name__ = checkpoint["""time_embed.0.weight"""] __magic_name__ = checkpoint["""time_embed.0.bias"""] __magic_name__ = checkpoint["""time_embed.2.weight"""] __magic_name__ = checkpoint["""time_embed.2.bias"""] if unet_config["num_class_embeds"] is not None: __magic_name__ = checkpoint["""label_emb.weight"""] __magic_name__ = checkpoint["""input_blocks.0.0.weight"""] __magic_name__ = checkpoint["""input_blocks.0.0.bias"""] __magic_name__ = unet_config["""down_block_types"""] __magic_name__ = unet_config["""layers_per_block"""] __magic_name__ = unet_config["""attention_head_dim"""] __magic_name__ = unet_config["""block_out_channels"""] __magic_name__ = 1 __magic_name__ = channels_list[0] for i, layer_type in enumerate(A_ ): __magic_name__ = channels_list[i] __magic_name__ = current_channels != prev_channels if layer_type == "ResnetDownsampleBlock2D": for j in range(A_ ): __magic_name__ = f'''down_blocks.{i}.resnets.{j}''' __magic_name__ = f'''input_blocks.{current_layer}.0''' __magic_name__ = True if j == 0 and downsample_block_has_skip else False __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) current_layer += 1 elif layer_type == "AttnDownBlock2D": for j in range(A_ ): __magic_name__ = f'''down_blocks.{i}.resnets.{j}''' __magic_name__ = f'''input_blocks.{current_layer}.0''' __magic_name__ = True if j == 0 and downsample_block_has_skip else False __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) __magic_name__ = f'''down_blocks.{i}.attentions.{j}''' __magic_name__ = f'''input_blocks.{current_layer}.1''' __magic_name__ = convert_attention( A_, A_, A_, A_, A_ ) current_layer += 1 if i != len(A_ ) - 1: __magic_name__ = f'''down_blocks.{i}.downsamplers.0''' __magic_name__ = f'''input_blocks.{current_layer}.0''' __magic_name__ = convert_resnet(A_, A_, A_, A_ ) current_layer += 1 __magic_name__ = current_channels # hardcoded the mid-block for now __magic_name__ = """mid_block.resnets.0""" __magic_name__ = """middle_block.0""" __magic_name__ = convert_resnet(A_, A_, A_, A_ ) __magic_name__ = """mid_block.attentions.0""" __magic_name__ = """middle_block.1""" __magic_name__ = convert_attention(A_, A_, A_, A_, A_ ) __magic_name__ = """mid_block.resnets.1""" __magic_name__ = """middle_block.2""" __magic_name__ = convert_resnet(A_, A_, A_, A_ ) __magic_name__ = 0 __magic_name__ = unet_config["""up_block_types"""] for i, layer_type in enumerate(A_ ): if layer_type == "ResnetUpsampleBlock2D": for j in range(layers_per_block + 1 ): __magic_name__ = f'''up_blocks.{i}.resnets.{j}''' __magic_name__ = f'''output_blocks.{current_layer}.0''' __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) current_layer += 1 if i != len(A_ ) - 1: __magic_name__ = f'''up_blocks.{i}.upsamplers.0''' __magic_name__ = f'''output_blocks.{current_layer-1}.1''' __magic_name__ = convert_resnet(A_, A_, A_, A_ ) elif layer_type == "AttnUpBlock2D": for j in range(layers_per_block + 1 ): __magic_name__ = f'''up_blocks.{i}.resnets.{j}''' __magic_name__ = f'''output_blocks.{current_layer}.0''' __magic_name__ = convert_resnet(A_, A_, A_, A_, has_skip=A_ ) __magic_name__ = f'''up_blocks.{i}.attentions.{j}''' __magic_name__ = f'''output_blocks.{current_layer}.1''' __magic_name__ = convert_attention( A_, A_, A_, A_, A_ ) current_layer += 1 if i != len(A_ ) - 1: __magic_name__ = f'''up_blocks.{i}.upsamplers.0''' __magic_name__ = f'''output_blocks.{current_layer-1}.2''' __magic_name__ = convert_resnet(A_, A_, A_, A_ ) __magic_name__ = checkpoint["""out.0.weight"""] __magic_name__ = checkpoint["""out.0.bias"""] __magic_name__ = checkpoint["""out.2.weight"""] __magic_name__ = checkpoint["""out.2.bias"""] return new_checkpoint if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.') parser.add_argument( '--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.' ) parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.') __lowerCAmelCase : int = parser.parse_args() __lowerCAmelCase : int = strabool(args.class_cond) __lowerCAmelCase : Tuple = os.path.basename(args.unet_path) print(F'''Checkpoint: {ckpt_name}''') # Get U-Net config if "imagenet64" in ckpt_name: __lowerCAmelCase : int = IMAGENET_64_UNET_CONFIG elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __lowerCAmelCase : Any = LSUN_256_UNET_CONFIG elif "test" in ckpt_name: __lowerCAmelCase : int = TEST_UNET_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') if not args.class_cond: __lowerCAmelCase : str = None __lowerCAmelCase : Union[str, Any] = con_pt_to_diffuser(args.unet_path, unet_config) __lowerCAmelCase : Optional[Any] = UNetaDModel(**unet_config) image_unet.load_state_dict(converted_unet_ckpt) # Get scheduler config if "cd" in ckpt_name or "test" in ckpt_name: __lowerCAmelCase : str = CD_SCHEDULER_CONFIG elif "ct" in ckpt_name and "imagenet64" in ckpt_name: __lowerCAmelCase : List[str] = CT_IMAGENET_64_SCHEDULER_CONFIG elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)): __lowerCAmelCase : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG else: raise ValueError(F'''Checkpoint type {ckpt_name} is not currently supported.''') __lowerCAmelCase : Union[str, Any] = CMStochasticIterativeScheduler(**scheduler_config) __lowerCAmelCase : Dict = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler) consistency_model.save_pretrained(args.dump_path)
76
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
1
from __future__ import annotations import requests def a__ ( A_ ): '''simple docstring''' __magic_name__ = f'''https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty''' return requests.get(A_ ).json() def a__ ( A_ = 10 ): '''simple docstring''' __magic_name__ = """https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty""" __magic_name__ = requests.get(A_ ).json()[:max_stories] return [get_hackernews_story(A_ ) for story_id in story_ids] def a__ ( A_ = 10 ): '''simple docstring''' __magic_name__ = hackernews_top_stories(A_ ) return "\n".join("""* [{title}]({url})""".format(**A_ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
# # This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or # many nodes) can talk to each other via nccl and allocate gpu memory. # # To run first adjust the number of processes and nodes: # # python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port # # You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d # # use torch.distributed.launch instead of torch.distributed.run for torch < 1.9 # # If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with: # # NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py # # which should tell you what's going on behind the scenes. # # # This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that # runs on 2 nodes of 4 gpus per node: # # #SBATCH --job-name=test-nodes # name # #SBATCH --nodes=2 # nodes # #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node! # #SBATCH --cpus-per-task=10 # number of cores per tasks # #SBATCH --gres=gpu:4 # number of gpus # #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS) # #SBATCH --output=%x-%j.out # output file name # # GPUS_PER_NODE=4 # MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1) # MASTER_PORT=6000 # # srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \ # --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \ # --master_addr $MASTER_ADDR --master_port $MASTER_PORT \ # torch-distributed-gpu-test.py' # import fcntl import os import socket import torch import torch.distributed as dist def a__ ( *A_ ): '''simple docstring''' with open(A_, """r""" ) as fh: fcntl.flock(A_, fcntl.LOCK_EX ) try: print(*A_ ) finally: fcntl.flock(A_, fcntl.LOCK_UN ) __lowerCAmelCase : Tuple = int(os.environ['LOCAL_RANK']) torch.cuda.set_device(local_rank) __lowerCAmelCase : str = torch.device('cuda', local_rank) __lowerCAmelCase : int = socket.gethostname() __lowerCAmelCase : Union[str, Any] = F'''[{hostname}-{local_rank}]''' try: # test distributed dist.init_process_group('nccl') dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM) dist.barrier() # test cuda is available and can allocate memory torch.cuda.is_available() torch.ones(1).cuda(local_rank) # global rank __lowerCAmelCase : Optional[Any] = dist.get_rank() __lowerCAmelCase : Union[str, Any] = dist.get_world_size() printflock(F'''{gpu} is OK (global rank: {rank}/{world_size})''') dist.barrier() if rank == 0: printflock(F'''pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}''') except Exception: printflock(F'''{gpu} is broken''') raise
76
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
1
import math import flax.linen as nn import jax.numpy as jnp def a__ ( A_, A_, A_ = 1, A_ = 1, A_ = 1.0e4, A_ = False, A_ = 1.0, ): '''simple docstring''' assert timesteps.ndim == 1, "Timesteps should be a 1d-array" assert embedding_dim % 2 == 0, f'''Embedding dimension {embedding_dim} should be even''' __magic_name__ = float(embedding_dim // 2 ) __magic_name__ = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift) __magic_name__ = min_timescale * jnp.exp(jnp.arange(A_, dtype=jnp.floataa ) * -log_timescale_increment ) __magic_name__ = jnp.expand_dims(A_, 1 ) * jnp.expand_dims(A_, 0 ) # scale embeddings __magic_name__ = scale * emb if flip_sin_to_cos: __magic_name__ = jnp.concatenate([jnp.cos(A_ ), jnp.sin(A_ )], axis=1 ) else: __magic_name__ = jnp.concatenate([jnp.sin(A_ ), jnp.cos(A_ )], axis=1 ) __magic_name__ = jnp.reshape(A_, [jnp.shape(A_ )[0], embedding_dim] ) return signal class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' a__ = 32 a__ = jnp.floataa @nn.compact def __call__( self : List[str] , UpperCamelCase__ : Any ) -> int: """simple docstring""" __magic_name__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(UpperCamelCase__ ) __magic_name__ = nn.silu(UpperCamelCase__ ) __magic_name__ = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(UpperCamelCase__ ) return temb class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' a__ = 32 a__ = False a__ = 1 @nn.compact def __call__( self : List[Any] , UpperCamelCase__ : Optional[int] ) -> Tuple: """simple docstring""" return get_sinusoidal_embeddings( UpperCamelCase__ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
76
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
1
import json import re from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np from ...utils import is_tf_available, is_torch_available, logging if TYPE_CHECKING: if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_codegen import CodeGenTokenizer __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'} __lowerCAmelCase : Dict = { 'vocab_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/vocab.json', }, 'merges_file': { 'Salesforce/codegen-350M-mono': 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/merges.txt', }, 'tokenizer_file': { 'Salesforce/codegen-350M-mono': ( 'https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/tokenizer.json' ), }, } __lowerCAmelCase : Optional[int] = { 'Salesforce/codegen-350M-mono': 2048, } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["""input_ids""", """attention_mask"""] a__ = CodeGenTokenizer def __init__( self : Tuple , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Any=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : List[Any]="<|endoftext|>" , UpperCamelCase__ : str="<|endoftext|>" , UpperCamelCase__ : List[Any]="<|endoftext|>" , UpperCamelCase__ : Tuple=False , **UpperCamelCase__ : Union[str, Any] , ) -> Tuple: """simple docstring""" super().__init__( UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , **UpperCamelCase__ , ) if kwargs.pop("""add_bos_token""" , UpperCamelCase__ ): __magic_name__ = kwargs.pop("""name_or_path""" , """""" ) raise ValueError( """Currenty GPT2's fast tokenizer does NOT support adding a BOS token.""" """Instead you should use GPT2's slow tokenizer class `CodeGenTokenizer` as follows: \n""" F'''`CodeGenTokenizer.from_pretrained(\'{model_id}\')`\nor\n''' F'''`AutoTokenizer.from_pretrained(\'{model_id}\', use_fast=False)`\n''' """This issue will be fixed soon, see: https://github.com/huggingface/tokenizers/pull/1005.""" """ so that the fast tokenizer works correctly.""" ) __magic_name__ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space: __magic_name__ = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) ) __magic_name__ = add_prefix_space __magic_name__ = pre_tok_class(**UpperCamelCase__ ) __magic_name__ = add_prefix_space def _lowercase ( self : Optional[Any] , *UpperCamelCase__ : Optional[int] , **UpperCamelCase__ : Optional[int] ) -> BatchEncoding: """simple docstring""" __magic_name__ = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[int] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> BatchEncoding: """simple docstring""" __magic_name__ = kwargs.get("""is_split_into_words""" , UpperCamelCase__ ) assert self.add_prefix_space or not is_split_into_words, ( F'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True ''' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __magic_name__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : Union[int, List[int], "np.ndarray", "torch.Tensor", "tf.Tensor"] , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[List[str]] = None , **UpperCamelCase__ : Optional[Any] , ) -> str: """simple docstring""" __magic_name__ = super().decode( token_ids=UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , **UpperCamelCase__ , ) if truncate_before_pattern is not None and len(UpperCamelCase__ ) > 0: __magic_name__ = self.truncate(UpperCamelCase__ , UpperCamelCase__ ) return decoded_text def _lowercase ( self : int , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> List[str]: """simple docstring""" def find_re(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] ): __magic_name__ = pattern.search(UpperCamelCase__ , UpperCamelCase__ ) return m.start() if m else -1 __magic_name__ = [re.compile(UpperCamelCase__ , re.MULTILINE ) for pattern in truncate_before_pattern] __magic_name__ = list(re.finditer("""^print""" , UpperCamelCase__ , re.MULTILINE ) ) if len(UpperCamelCase__ ) > 1: __magic_name__ = completion[: prints[1].start()] __magic_name__ = list(re.finditer("""^def""" , UpperCamelCase__ , re.MULTILINE ) ) if len(UpperCamelCase__ ) > 1: __magic_name__ = completion[: defs[1].start()] __magic_name__ = 0 __magic_name__ = [ pos for pos in [find_re(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) for terminal in terminals] if pos != -1 ] if len(UpperCamelCase__ ) > 0: return completion[: min(UpperCamelCase__ )] else: return completion
76
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
1
import json import os from typing import Optional, Tuple import regex as re from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : Optional[int] = logging.get_logger(__name__) __lowerCAmelCase : List[str] = { 'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', } __lowerCAmelCase : Dict = { 'vocab_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'}, 'merges_file': {'ctrl': 'https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'}, } __lowerCAmelCase : Tuple = { 'ctrl': 256, } __lowerCAmelCase : Any = { 'Pregnancy': 168629, 'Christianity': 7675, 'Explain': 106423, 'Fitness': 63440, 'Saving': 63163, 'Ask': 27171, 'Ass': 95985, 'Joke': 163509, 'Questions': 45622, 'Thoughts': 49605, 'Retail': 52342, 'Feminism': 164338, 'Writing': 11992, 'Atheism': 192263, 'Netflix': 48616, 'Computing': 39639, 'Opinion': 43213, 'Alone': 44967, 'Funny': 58917, 'Gaming': 40358, 'Human': 4088, 'India': 1331, 'Joker': 77138, 'Diet': 36206, 'Legal': 11859, 'Norman': 4939, 'Tip': 72689, 'Weight': 52343, 'Movies': 46273, 'Running': 23425, 'Science': 2090, 'Horror': 37793, 'Confession': 60572, 'Finance': 12250, 'Politics': 16360, 'Scary': 191985, 'Support': 12654, 'Technologies': 32516, 'Teenage': 66160, 'Event': 32769, 'Learned': 67460, 'Notion': 182770, 'Wikipedia': 37583, 'Books': 6665, 'Extract': 76050, 'Confessions': 102701, 'Conspiracy': 75932, 'Links': 63674, 'Narcissus': 150425, 'Relationship': 54766, 'Relationships': 134796, 'Reviews': 41671, 'News': 4256, 'Translation': 26820, 'multilingual': 128406, } def a__ ( A_ ): '''simple docstring''' __magic_name__ = set() __magic_name__ = word[0] for char in word[1:]: pairs.add((prev_char, char) ) __magic_name__ = char __magic_name__ = set(A_ ) return pairs class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = CONTROL_CODES def __init__( self : int , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any="<unk>" , **UpperCamelCase__ : Dict ) -> List[Any]: """simple docstring""" super().__init__(unk_token=UpperCamelCase__ , **UpperCamelCase__ ) with open(UpperCamelCase__ , encoding="""utf-8""" ) as vocab_handle: __magic_name__ = json.load(UpperCamelCase__ ) __magic_name__ = {v: k for k, v in self.encoder.items()} with open(UpperCamelCase__ , encoding="""utf-8""" ) as merges_handle: __magic_name__ = merges_handle.read().split("""\n""" )[1:-1] __magic_name__ = [tuple(merge.split() ) for merge in merges] __magic_name__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) __magic_name__ = {} @property def _lowercase ( self : Optional[int] ) -> int: """simple docstring""" return len(self.encoder ) def _lowercase ( self : Tuple ) -> Optional[int]: """simple docstring""" return dict(self.encoder , **self.added_tokens_encoder ) def _lowercase ( self : str , UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" if token in self.cache: return self.cache[token] __magic_name__ = tuple(UpperCamelCase__ ) __magic_name__ = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] ) __magic_name__ = get_pairs(UpperCamelCase__ ) if not pairs: return token while True: __magic_name__ = min(UpperCamelCase__ , key=lambda UpperCamelCase__ : self.bpe_ranks.get(UpperCamelCase__ , float("""inf""" ) ) ) if bigram not in self.bpe_ranks: break __magic_name__ , __magic_name__ = bigram __magic_name__ = [] __magic_name__ = 0 while i < len(UpperCamelCase__ ): try: __magic_name__ = word.index(UpperCamelCase__ , UpperCamelCase__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) __magic_name__ = j if word[i] == first and i < len(UpperCamelCase__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 __magic_name__ = tuple(UpperCamelCase__ ) __magic_name__ = new_word if len(UpperCamelCase__ ) == 1: break else: __magic_name__ = get_pairs(UpperCamelCase__ ) __magic_name__ = """@@ """.join(UpperCamelCase__ ) __magic_name__ = word[:-4] __magic_name__ = word return word def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ) -> Any: """simple docstring""" __magic_name__ = [] __magic_name__ = re.findall(R"""\S+\n?""" , UpperCamelCase__ ) for token in words: split_tokens.extend(list(self.bpe(UpperCamelCase__ ).split(""" """ ) ) ) return split_tokens def _lowercase ( self : int , UpperCamelCase__ : str ) -> Dict: """simple docstring""" return self.encoder.get(UpperCamelCase__ , self.encoder.get(self.unk_token ) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] ) -> Optional[Any]: """simple docstring""" return self.decoder.get(UpperCamelCase__ , self.unk_token ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" __magic_name__ = """ """.join(UpperCamelCase__ ).replace("""@@ """ , """""" ).strip() return out_string def _lowercase ( self : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) __magic_name__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] ) with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as f: f.write(json.dumps(self.encoder , indent=2 , sort_keys=UpperCamelCase__ , ensure_ascii=UpperCamelCase__ ) + """\n""" ) __magic_name__ = 0 with open(UpperCamelCase__ , """w""" , encoding="""utf-8""" ) as writer: writer.write("""#version: 0.2\n""" ) for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda UpperCamelCase__ : kv[1] ): if index != token_index: logger.warning( F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.''' """ Please check that the tokenizer is not corrupted!""" ) __magic_name__ = token_index writer.write(""" """.join(UpperCamelCase__ ) + """\n""" ) index += 1 return vocab_file, merge_file # def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True): # filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens)) # tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens) # tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far) # return ''.join(tokens_generated_so_far)
76
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
1
import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : Dict = { 'post_extract_proj': 'feature_projection.projection', 'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv', 'self_attn.k_proj': 'encoder.layers.*.attention.k_proj', 'self_attn.v_proj': 'encoder.layers.*.attention.v_proj', 'self_attn.q_proj': 'encoder.layers.*.attention.q_proj', 'self_attn.out_proj': 'encoder.layers.*.attention.out_proj', 'self_attn_layer_norm': 'encoder.layers.*.layer_norm', 'fc1': 'encoder.layers.*.feed_forward.intermediate_dense', 'fc2': 'encoder.layers.*.feed_forward.output_dense', 'final_layer_norm': 'encoder.layers.*.final_layer_norm', 'encoder.layer_norm': 'encoder.layer_norm', 'w2v_model.layer_norm': 'feature_projection.layer_norm', 'w2v_encoder.proj': 'lm_head', 'mask_emb': 'masked_spec_embed', } def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' for attribute in key.split(""".""" ): __magic_name__ = getattr(A_, A_ ) if weight_type is not None: __magic_name__ = getattr(A_, A_ ).shape else: __magic_name__ = hf_pointer.shape assert hf_shape == value.shape, ( f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __magic_name__ = value elif weight_type == "weight_g": __magic_name__ = value elif weight_type == "weight_v": __magic_name__ = value elif weight_type == "bias": __magic_name__ = value else: __magic_name__ = value logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = [] __magic_name__ = fairseq_model.state_dict() __magic_name__ = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): __magic_name__ = False if "conv_layers" in name: load_conv_layer( A_, A_, A_, A_, hf_model.config.feat_extract_norm == """group""", ) __magic_name__ = True else: for key, mapped_key in MAPPING.items(): __magic_name__ = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned): __magic_name__ = True if "*" in mapped_key: __magic_name__ = name.split(A_ )[0].split(""".""" )[-2] __magic_name__ = mapped_key.replace("""*""", A_ ) if "weight_g" in name: __magic_name__ = """weight_g""" elif "weight_v" in name: __magic_name__ = """weight_v""" elif "weight" in name: __magic_name__ = """weight""" elif "bias" in name: __magic_name__ = """bias""" else: __magic_name__ = None set_recursively(A_, A_, A_, A_, A_ ) continue if not is_used: unused_weights.append(A_ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = full_name.split("""conv_layers.""" )[-1] __magic_name__ = name.split(""".""" ) __magic_name__ = int(items[0] ) __magic_name__ = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __magic_name__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __magic_name__ = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was''' " found." ) __magic_name__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' ) __magic_name__ = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(A_ ) @torch.no_grad() def a__ ( A_, A_, A_=None, A_=None, A_=True ): '''simple docstring''' if config_path is not None: __magic_name__ = HubertConfig.from_pretrained(A_ ) else: __magic_name__ = HubertConfig() if is_finetuned: if dict_path: __magic_name__ = Dictionary.load(A_ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __magic_name__ = target_dict.pad_index __magic_name__ = target_dict.bos_index __magic_name__ = target_dict.eos_index __magic_name__ = len(target_dict.symbols ) __magic_name__ = os.path.join(A_, """vocab.json""" ) if not os.path.isdir(A_ ): logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(A_ ) ) return os.makedirs(A_, exist_ok=A_ ) with open(A_, """w""", encoding="""utf-8""" ) as vocab_handle: json.dump(target_dict.indices, A_ ) __magic_name__ = WavaVecaCTCTokenizer( A_, unk_token=target_dict.unk_word, pad_token=target_dict.pad_word, bos_token=target_dict.bos_word, eos_token=target_dict.eos_word, word_delimiter_token="""|""", do_lower_case=A_, ) __magic_name__ = True if config.feat_extract_norm == """layer""" else False __magic_name__ = WavaVecaFeatureExtractor( feature_size=1, sampling_rate=16000, padding_value=0, do_normalize=A_, return_attention_mask=A_, ) __magic_name__ = WavaVecaProcessor(feature_extractor=A_, tokenizer=A_ ) processor.save_pretrained(A_ ) __magic_name__ = HubertForCTC(A_ ) else: __magic_name__ = HubertModel(A_ ) if is_finetuned: __magic_name__ , __magic_name__ , __magic_name__ = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path], arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} ) else: __magic_name__ , __magic_name__ , __magic_name__ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) __magic_name__ = model[0].eval() recursively_load_weights(A_, A_, A_ ) hf_wavavec.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint') parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') parser.add_argument( '--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not' ) __lowerCAmelCase : Dict = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
76
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
1
import re import warnings from contextlib import contextmanager from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""image_processor""", """tokenizer"""] a__ = """AutoImageProcessor""" a__ = """AutoTokenizer""" def __init__( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Dict=None , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , UpperCamelCase__ , ) __magic_name__ = kwargs.pop("""feature_extractor""" ) __magic_name__ = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""" ) if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""" ) super().__init__(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self.image_processor __magic_name__ = False def __call__( self : List[Any] , *UpperCamelCase__ : Any , **UpperCamelCase__ : str ) -> Any: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = kwargs.pop("""images""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""text""" , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __magic_name__ = args[0] __magic_name__ = args[1:] if images is None and text is None: raise ValueError("""You need to specify either an `images` or `text` input to process.""" ) if images is not None: __magic_name__ = self.image_processor(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: __magic_name__ = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif images is None: return encodings else: __magic_name__ = encodings["""input_ids"""] return inputs def _lowercase ( self : Tuple , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) @contextmanager def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" warnings.warn( """`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """ """labels by using the argument `text` of the regular `__call__` method (either in the same call as """ """your images inputs, or in a separate call.""" ) __magic_name__ = True __magic_name__ = self.tokenizer yield __magic_name__ = self.image_processor __magic_name__ = False def _lowercase ( self : str , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Any=None ) -> Optional[Any]: """simple docstring""" if added_vocab is None: __magic_name__ = self.tokenizer.get_added_vocab() __magic_name__ = {} while tokens: __magic_name__ = re.search(R"""<s_(.*?)>""" , UpperCamelCase__ , re.IGNORECASE ) if start_token is None: break __magic_name__ = start_token.group(1 ) __magic_name__ = re.search(RF'''</s_{key}>''' , UpperCamelCase__ , re.IGNORECASE ) __magic_name__ = start_token.group() if end_token is None: __magic_name__ = tokens.replace(UpperCamelCase__ , """""" ) else: __magic_name__ = end_token.group() __magic_name__ = re.escape(UpperCamelCase__ ) __magic_name__ = re.escape(UpperCamelCase__ ) __magic_name__ = re.search(F'''{start_token_escaped}(.*?){end_token_escaped}''' , UpperCamelCase__ , re.IGNORECASE ) if content is not None: __magic_name__ = content.group(1 ).strip() if r"<s_" in content and r"</s_" in content: # non-leaf node __magic_name__ = self.tokenajson(UpperCamelCase__ , is_inner_value=UpperCamelCase__ , added_vocab=UpperCamelCase__ ) if value: if len(UpperCamelCase__ ) == 1: __magic_name__ = value[0] __magic_name__ = value else: # leaf nodes __magic_name__ = [] for leaf in content.split(R"""<sep/>""" ): __magic_name__ = leaf.strip() if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>": __magic_name__ = leaf[1:-2] # for categorical special tokens output[key].append(UpperCamelCase__ ) if len(output[key] ) == 1: __magic_name__ = output[key][0] __magic_name__ = tokens[tokens.find(UpperCamelCase__ ) + len(UpperCamelCase__ ) :].strip() if tokens[:6] == r"<sep/>": # non-leaf nodes return [output] + self.tokenajson(tokens[6:] , is_inner_value=UpperCamelCase__ , added_vocab=UpperCamelCase__ ) if len(UpperCamelCase__ ): return [output] if is_inner_value else output else: return [] if is_inner_value else {"text_sequence": tokens} @property def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , UpperCamelCase__ , ) return self.image_processor_class @property def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" warnings.warn( """`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , UpperCamelCase__ , ) return self.image_processor
76
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
1
import math import unittest from transformers import BioGptConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification, BioGptModel, BioGptTokenizer, ) from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : int , UpperCamelCase__ : str , UpperCamelCase__ : List[str]=13 , UpperCamelCase__ : Optional[int]=7 , UpperCamelCase__ : str=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : int=True , UpperCamelCase__ : Any=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : int=5 , UpperCamelCase__ : Optional[Any]=4 , UpperCamelCase__ : Tuple=37 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Tuple=512 , UpperCamelCase__ : Optional[int]=16 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Tuple=0.02 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : str=None , ) -> Any: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : List[Any] ) -> Dict: """simple docstring""" return BioGptConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int ) -> str: """simple docstring""" __magic_name__ = BioGptModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = BioGptForCausalLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : Dict ) -> Tuple: """simple docstring""" __magic_name__ = BioGptModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() # create attention mask __magic_name__ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase__ ) __magic_name__ = self.seq_length // 2 __magic_name__ = 0 # first forward pass __magic_name__ , __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ ).to_tuple() # create hypothetical next token and extent to next_input_ids __magic_name__ = ids_tensor((self.batch_size, 1) , config.vocab_size ) # change a random masked slice from input_ids __magic_name__ = ids_tensor((1,) , UpperCamelCase__ ).item() + 1 __magic_name__ = ids_tensor((self.batch_size, 1) , config.vocab_size ).squeeze(-1 ) __magic_name__ = random_other_next_tokens # append to next input_ids and attn_mask __magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 ) __magic_name__ = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=UpperCamelCase__ )] , dim=1 , ) # get two different outputs __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )["""last_hidden_state"""] __magic_name__ = model(UpperCamelCase__ , past_key_values=UpperCamelCase__ , attention_mask=UpperCamelCase__ )["""last_hidden_state"""] # select random slice __magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() __magic_name__ = output_from_no_past[:, -1, random_slice_idx].detach() __magic_name__ = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : List[Any] ) -> Any: """simple docstring""" __magic_name__ = BioGptModel(config=UpperCamelCase__ ).to(UpperCamelCase__ ).eval() __magic_name__ = torch.ones(input_ids.shape , dtype=torch.long , device=UpperCamelCase__ ) # first forward pass __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , use_cache=UpperCamelCase__ ) __magic_name__ , __magic_name__ = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids __magic_name__ = ids_tensor((self.batch_size, 3) , config.vocab_size ) __magic_name__ = ids_tensor((self.batch_size, 3) , 2 ) # append to next input_ids and __magic_name__ = torch.cat([input_ids, next_tokens] , dim=-1 ) __magic_name__ = torch.cat([attention_mask, next_attn_mask] , dim=-1 ) __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ )["""last_hidden_state"""] __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , past_key_values=UpperCamelCase__ )[ """last_hidden_state""" ] # select random slice __magic_name__ = ids_tensor((1,) , output_from_past.shape[-1] ).item() __magic_name__ = output_from_no_past[:, -3:, random_slice_idx].detach() __magic_name__ = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] ) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) ) def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Dict=False ) -> Tuple: """simple docstring""" __magic_name__ = BioGptForCausalLM(UpperCamelCase__ ) model.to(UpperCamelCase__ ) if gradient_checkpointing: model.gradient_checkpointing_enable() __magic_name__ = model(UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) result.loss.backward() def _lowercase ( self : List[Any] , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = BioGptModel(UpperCamelCase__ ) __magic_name__ = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers ) for key in model.state_dict().keys(): if "c_proj" in key and "weight" in key: self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key] ) - model_std ) , 0.001 ) self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key] ) - 0.0 ) , 0.01 ) def _lowercase ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : List[Any] , *UpperCamelCase__ : Optional[int] ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = BioGptForTokenClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( (BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification) if is_torch_available() else () ) a__ = (BioGptForCausalLM,) if is_torch_available() else () a__ = ( { """feature-extraction""": BioGptModel, """text-classification""": BioGptForSequenceClassification, """text-generation""": BioGptForCausalLM, """token-classification""": BioGptForTokenClassification, """zero-shot""": BioGptForSequenceClassification, } if is_torch_available() else {} ) a__ = False def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = BioGptModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : int ) -> Optional[int]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_attention_mask_past(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_forward_and_backwards(*UpperCamelCase__ , gradient_checkpointing=UpperCamelCase__ ) def _lowercase ( self : str ) -> Dict: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_model_past_large_inputs(*UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_weight_initialization(*UpperCamelCase__ ) def _lowercase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_biogpt_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(UpperCamelCase__ ) __magic_name__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) __magic_name__ = """left""" # Define PAD Token = EOS Token = 50256 __magic_name__ = tokenizer.eos_token __magic_name__ = model.config.eos_token_id # use different length sentences to test batching __magic_name__ = [ """Hello, my dog is a little""", """Today, I""", ] __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" , padding=UpperCamelCase__ ) __magic_name__ = inputs["""input_ids"""].to(UpperCamelCase__ ) __magic_name__ = model.generate( input_ids=UpperCamelCase__ , attention_mask=inputs["""attention_mask"""].to(UpperCamelCase__ ) , ) __magic_name__ = tokenizer(sentences[0] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ ) __magic_name__ = model.generate(input_ids=UpperCamelCase__ ) __magic_name__ = inputs_non_padded.shape[-1] - inputs["""attention_mask"""][-1].long().sum().cpu().item() __magic_name__ = tokenizer(sentences[1] , return_tensors="""pt""" ).input_ids.to(UpperCamelCase__ ) __magic_name__ = model.generate(input_ids=UpperCamelCase__ , max_length=model.config.max_length - num_paddings ) __magic_name__ = tokenizer.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ ) __magic_name__ = tokenizer.decode(output_non_padded[0] , skip_special_tokens=UpperCamelCase__ ) __magic_name__ = tokenizer.decode(output_padded[0] , skip_special_tokens=UpperCamelCase__ ) __magic_name__ = [ """Hello, my dog is a little bit bigger than a little bit.""", """Today, I have a good idea of how to use the information""", ] self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , [non_padded_sentence, padded_sentence] ) @slow def _lowercase ( self : Tuple ) -> Optional[int]: """simple docstring""" for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = BioGptModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = input_dict["""input_ids"""] __magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ ) __magic_name__ = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size ) __magic_name__ = BioGptForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = 3 __magic_name__ = """multi_label_classification""" __magic_name__ = input_dict["""input_ids"""] __magic_name__ = input_ids.ne(1 ).to(UpperCamelCase__ ) __magic_name__ = ids_tensor( [self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size ).to(torch.float ) __magic_name__ = BioGptForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , labels=UpperCamelCase__ ) self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels) ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) __magic_name__ = torch.tensor([[2, 4805, 9, 656, 21]] ) __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = 4_2384 __magic_name__ = torch.Size((1, 5, vocab_size) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : List[str] ) -> int: """simple docstring""" __magic_name__ = BioGptTokenizer.from_pretrained("""microsoft/biogpt""" ) __magic_name__ = BioGptForCausalLM.from_pretrained("""microsoft/biogpt""" ) model.to(UpperCamelCase__ ) torch.manual_seed(0 ) __magic_name__ = tokenizer("""COVID-19 is""" , return_tensors="""pt""" ).to(UpperCamelCase__ ) __magic_name__ = model.generate( **UpperCamelCase__ , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=UpperCamelCase__ , ) __magic_name__ = tokenizer.decode(output_ids[0] , skip_special_tokens=UpperCamelCase__ ) __magic_name__ = ( """COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the""" """ causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and""" """ territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),""" """ and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and""" """ more than 800,000 deaths.""" ) self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
76
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
1
from typing import TYPE_CHECKING from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Optional[Any] = {'configuration_mmbt': ['MMBTConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings'] if TYPE_CHECKING: from .configuration_mmbt import MMBTConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings else: import sys __lowerCAmelCase : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
1
from __future__ import annotations import random import unittest from transformers import TransfoXLConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLModel, ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : Dict , ) -> List[Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = 13 __magic_name__ = 7 __magic_name__ = 30 __magic_name__ = self.seq_length + self.mem_len __magic_name__ = 15 __magic_name__ = True __magic_name__ = True __magic_name__ = 99 __magic_name__ = [10, 50, 80] __magic_name__ = 32 __magic_name__ = 32 __magic_name__ = 4 __magic_name__ = 8 __magic_name__ = 128 __magic_name__ = 2 __magic_name__ = 2 __magic_name__ = None __magic_name__ = 1 __magic_name__ = 0 __magic_name__ = 3 __magic_name__ = self.vocab_size - 1 __magic_name__ = 0.01 def _lowercase ( self : Any ) -> Tuple: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = TransfoXLConfig( vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , ) return (config, input_ids_a, input_ids_a, lm_labels) def _lowercase ( self : Any ) -> Tuple: """simple docstring""" random.seed(self.seed ) tf.random.set_seed(self.seed ) def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" __magic_name__ = TFTransfoXLModel(UpperCamelCase__ ) __magic_name__ , __magic_name__ = model(UpperCamelCase__ ).to_tuple() __magic_name__ = {"""input_ids""": input_ids_a, """mems""": mems_a} __magic_name__ , __magic_name__ = model(UpperCamelCase__ ).to_tuple() self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowercase ( self : Tuple , UpperCamelCase__ : Dict , UpperCamelCase__ : str , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] ) -> str: """simple docstring""" __magic_name__ = TFTransfoXLLMHeadModel(UpperCamelCase__ ) __magic_name__ , __magic_name__ = model(UpperCamelCase__ ).to_tuple() __magic_name__ = {"""input_ids""": input_ids_a, """labels""": lm_labels} __magic_name__ , __magic_name__ = model(UpperCamelCase__ ).to_tuple() __magic_name__ , __magic_name__ = model([input_ids_a, mems_a] ).to_tuple() __magic_name__ = {"""input_ids""": input_ids_a, """mems""": mems_a, """labels""": lm_labels} __magic_name__ , __magic_name__ = model(UpperCamelCase__ ).to_tuple() self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertListEqual( [mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , ) def _lowercase ( self : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = TFTransfoXLForSequenceClassification(UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : List[Any] ) -> int: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids_a} return config, inputs_dict @require_tf class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( (TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else () ) a__ = () if is_tf_available() else () a__ = ( { """feature-extraction""": TFTransfoXLModel, """text-classification""": TFTransfoXLForSequenceClassification, """text-generation""": TFTransfoXLLMHeadModel, """zero-shot""": TFTransfoXLForSequenceClassification, } if is_tf_available() else {} ) # TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented a__ = False a__ = False a__ = False a__ = False def _lowercase ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : str , UpperCamelCase__ : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Union[str, Any] ) -> List[Any]: """simple docstring""" if pipeline_test_casse_name == "TextGenerationPipelineTests": # Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`. # `TransfoXLConfig` was never used in pipeline tests: cannot create a simple # tokenizer. return True return False def _lowercase ( self : List[Any] ) -> Any: """simple docstring""" __magic_name__ = TFTransfoXLModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , d_embed=37 ) def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : List[Any] ) -> Tuple: """simple docstring""" self.model_tester.set_seed() __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase__ ) def _lowercase ( self : Any ) -> Optional[int]: """simple docstring""" self.model_tester.set_seed() __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase__ ) def _lowercase ( self : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : Any ) -> Tuple: """simple docstring""" __magic_name__ , __magic_name__ = self.model_tester.prepare_config_and_inputs_for_common() __magic_name__ = [TFTransfoXLForSequenceClassification] for model_class in self.all_model_classes: __magic_name__ = model_class(UpperCamelCase__ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class in list_other_models_with_output_ebd: __magic_name__ = model.get_output_embeddings() assert isinstance(UpperCamelCase__ , tf.keras.layers.Layer ) __magic_name__ = model.get_bias() assert name is None else: __magic_name__ = model.get_output_embeddings() assert x is None __magic_name__ = model.get_bias() assert name is None def _lowercase ( self : Tuple ) -> Optional[Any]: """simple docstring""" pass @slow def _lowercase ( self : Dict ) -> int: """simple docstring""" for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = TFTransfoXLModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @unittest.skip(reason="""This model doesn't play well with fit() due to not returning a single loss.""" ) def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" pass @require_tf class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @unittest.skip("""Skip test until #12651 is resolved.""" ) @slow def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = TFTransfoXLLMHeadModel.from_pretrained("""transfo-xl-wt103""" ) # fmt: off __magic_name__ = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231 # fmt: on # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> # fmt: off __magic_name__ = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231 # fmt: on # In 1991, the remains of Russian Tsar Nicholas II and his family ( # except for Alexei and Maria ) are discovered. The voice of young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story. # 1883 Western Siberia, a young Grigori Rasputin is asked by his father # and a group of men to perform magic. Rasputin has a vision and # denounces one of the men as a horse thief. Although his father initially # slaps him for making such an accusation, Rasputin watches as the man # is chased outside and beaten. Twenty years later, Rasputin sees a vision # of the Virgin Mary, prompting him to become a priest. # Rasputin quickly becomes famous, with people, even a bishop, begging for # his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar # Nicholas II and his family were discovered. The voice of <unk> young son, # Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos> __magic_name__ = model.generate(UpperCamelCase__ , max_length=200 , do_sample=UpperCamelCase__ ) self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase__ )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def a__ ( A_, A_, A_ = "x", A_ = 10**-10, A_ = 1, ): '''simple docstring''' __magic_name__ = symbols(A_ ) __magic_name__ = lambdify(A_, A_ ) __magic_name__ = lambdify(A_, diff(A_, A_ ) ) __magic_name__ = starting_point while True: if diff_function(A_ ) != 0: __magic_name__ = prev_guess - multiplicity * func(A_ ) / diff_function( A_ ) else: raise ZeroDivisionError("""Could not find root""" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess __magic_name__ = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(F'''The root of sin(x) = 0 is {newton_raphson("sin(x)", 2)}''') # Find root of polynomial # Find fourth Root of 5 print(F'''The root of x**4 - 5 = 0 is {newton_raphson("x**4 -5", 0.4 +5j)}''') # Find value of e print( 'The root of log(y) - 1 = 0 is ', F'''{newton_raphson("log(y) - 1", 2, variable="y")}''', ) # Exponential Roots print( 'The root of exp(x) - 1 = 0 is', F'''{newton_raphson("exp(x) - 1", 10, precision=0.005)}''', ) # Find root of cos(x) print(F'''The root of cos(x) = 0 is {newton_raphson("cos(x)", 0)}''')
76
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
1
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
76
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
1
class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[str] ) -> None: """simple docstring""" __magic_name__ = {} # Mapping from char to TrieNode __magic_name__ = False def _lowercase ( self : Any , UpperCamelCase__ : list[str] ) -> None: """simple docstring""" for word in words: self.insert(UpperCamelCase__ ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : str ) -> None: """simple docstring""" __magic_name__ = self for char in word: if char not in curr.nodes: __magic_name__ = TrieNode() __magic_name__ = curr.nodes[char] __magic_name__ = True def _lowercase ( self : Dict , UpperCamelCase__ : str ) -> bool: """simple docstring""" __magic_name__ = self for char in word: if char not in curr.nodes: return False __magic_name__ = curr.nodes[char] return curr.is_leaf def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str ) -> None: """simple docstring""" def _delete(UpperCamelCase__ : TrieNode , UpperCamelCase__ : str , UpperCamelCase__ : int ) -> bool: if index == len(UpperCamelCase__ ): # If word does not exist if not curr.is_leaf: return False __magic_name__ = False return len(curr.nodes ) == 0 __magic_name__ = word[index] __magic_name__ = curr.nodes.get(UpperCamelCase__ ) # If char not in current trie node if not char_node: return False # Flag to check if node can be deleted __magic_name__ = _delete(UpperCamelCase__ , UpperCamelCase__ , index + 1 ) if delete_curr: del curr.nodes[char] return len(curr.nodes ) == 0 return delete_curr _delete(self , UpperCamelCase__ , 0 ) def a__ ( A_, A_ ): '''simple docstring''' if node.is_leaf: print(A_, end=""" """ ) for key, value in node.nodes.items(): print_words(A_, word + key ) def a__ ( ): '''simple docstring''' __magic_name__ = """banana bananas bandana band apple all beast""".split() __magic_name__ = TrieNode() root.insert_many(A_ ) # print_words(root, "") assert all(root.find(A_ ) for word in words ) assert root.find("""banana""" ) assert not root.find("""bandanas""" ) assert not root.find("""apps""" ) assert root.find("""apple""" ) assert root.find("""all""" ) root.delete("""all""" ) assert not root.find("""all""" ) root.delete("""banana""" ) assert not root.find("""banana""" ) assert root.find("""bananas""" ) return True def a__ ( A_, A_ ): '''simple docstring''' print(str(A_ ), """works!""" if passes else """doesn't work :(""" ) def a__ ( ): '''simple docstring''' assert test_trie() def a__ ( ): '''simple docstring''' print_results("""Testing trie functionality""", test_trie() ) if __name__ == "__main__": main()
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
from __future__ import annotations import math import random from collections.abc import Collection from typing import overload class UpperCAmelCase_ : '''simple docstring''' def __init__( self : List[Any] , UpperCamelCase__ : Collection[float] | None = None ) -> None: """simple docstring""" if components is None: __magic_name__ = [] __magic_name__ = list(UpperCamelCase__ ) def __len__( self : str ) -> int: """simple docstring""" return len(self.__components ) def __str__( self : int ) -> str: """simple docstring""" return "(" + ",".join(map(UpperCamelCase__ , self.__components ) ) + ")" def __add__( self : Union[str, Any] , UpperCamelCase__ : Vector ) -> Vector: """simple docstring""" __magic_name__ = len(self ) if size == len(UpperCamelCase__ ): __magic_name__ = [self.__components[i] + other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )] return Vector(UpperCamelCase__ ) else: raise Exception("""must have the same size""" ) def __sub__( self : Dict , UpperCamelCase__ : Vector ) -> Vector: """simple docstring""" __magic_name__ = len(self ) if size == len(UpperCamelCase__ ): __magic_name__ = [self.__components[i] - other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )] return Vector(UpperCamelCase__ ) else: # error case raise Exception("""must have the same size""" ) @overload def __mul__( self : Any , UpperCamelCase__ : float ) -> Vector: """simple docstring""" ... @overload def __mul__( self : List[str] , UpperCamelCase__ : Vector ) -> float: """simple docstring""" ... def __mul__( self : Any , UpperCamelCase__ : float | Vector ) -> float | Vector: """simple docstring""" if isinstance(UpperCamelCase__ , (float, int) ): __magic_name__ = [c * other for c in self.__components] return Vector(UpperCamelCase__ ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(self ) == len(UpperCamelCase__ ): __magic_name__ = len(self ) __magic_name__ = [self.__components[i] * other.component(UpperCamelCase__ ) for i in range(UpperCamelCase__ )] return sum(UpperCamelCase__ ) else: # error case raise Exception("""invalid operand!""" ) def _lowercase ( self : str ) -> Vector: """simple docstring""" return Vector(self.__components ) def _lowercase ( self : Dict , UpperCamelCase__ : int ) -> float: """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and -len(self.__components ) <= i < len(self.__components ): return self.__components[i] else: raise Exception("""index out of range""" ) def _lowercase ( self : Dict , UpperCamelCase__ : int , UpperCamelCase__ : float ) -> None: """simple docstring""" assert -len(self.__components ) <= pos < len(self.__components ) __magic_name__ = value def _lowercase ( self : Any ) -> float: """simple docstring""" if len(self.__components ) == 0: raise Exception("""Vector is empty""" ) __magic_name__ = [c**2 for c in self.__components] return math.sqrt(sum(UpperCamelCase__ ) ) def _lowercase ( self : List[str] , UpperCamelCase__ : Vector , UpperCamelCase__ : bool = False ) -> float: """simple docstring""" __magic_name__ = self * other __magic_name__ = self.euclidean_length() * other.euclidean_length() if deg: return math.degrees(math.acos(num / den ) ) else: return math.acos(num / den ) def a__ ( A_ ): '''simple docstring''' assert isinstance(A_, A_ ) return Vector([0] * dimension ) def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) and (isinstance(A_, A_ )) __magic_name__ = [0] * dimension __magic_name__ = 1 return Vector(A_ ) def a__ ( A_, A_, A_ ): '''simple docstring''' assert ( isinstance(A_, A_ ) and isinstance(A_, A_ ) and (isinstance(A_, (int, float) )) ) return x * scalar + y def a__ ( A_, A_, A_ ): '''simple docstring''' random.seed(A_ ) __magic_name__ = [random.randint(A_, A_ ) for _ in range(A_ )] return Vector(A_ ) class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Union[str, Any] , UpperCamelCase__ : list[list[float]] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> None: """simple docstring""" __magic_name__ = matrix __magic_name__ = w __magic_name__ = h def __str__( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = """""" for i in range(self.__height ): ans += "|" for j in range(self.__width ): if j < self.__width - 1: ans += str(self.__matrix[i][j] ) + "," else: ans += str(self.__matrix[i][j] ) + "|\n" return ans def __add__( self : List[Any] , UpperCamelCase__ : Matrix ) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __magic_name__ = [] for i in range(self.__height ): __magic_name__ = [ self.__matrix[i][j] + other.component(UpperCamelCase__ , UpperCamelCase__ ) for j in range(self.__width ) ] matrix.append(UpperCamelCase__ ) return Matrix(UpperCamelCase__ , self.__width , self.__height ) else: raise Exception("""matrix must have the same dimension!""" ) def __sub__( self : Union[str, Any] , UpperCamelCase__ : Matrix ) -> Matrix: """simple docstring""" if self.__width == other.width() and self.__height == other.height(): __magic_name__ = [] for i in range(self.__height ): __magic_name__ = [ self.__matrix[i][j] - other.component(UpperCamelCase__ , UpperCamelCase__ ) for j in range(self.__width ) ] matrix.append(UpperCamelCase__ ) return Matrix(UpperCamelCase__ , self.__width , self.__height ) else: raise Exception("""matrices must have the same dimension!""" ) @overload def __mul__( self : Dict , UpperCamelCase__ : float ) -> Matrix: """simple docstring""" ... @overload def __mul__( self : Tuple , UpperCamelCase__ : Vector ) -> Vector: """simple docstring""" ... def __mul__( self : Dict , UpperCamelCase__ : float | Vector ) -> Vector | Matrix: """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ): # matrix-vector if len(UpperCamelCase__ ) == self.__width: __magic_name__ = zero_vector(self.__height ) for i in range(self.__height ): __magic_name__ = [ self.__matrix[i][j] * other.component(UpperCamelCase__ ) for j in range(self.__width ) ] ans.change_component(UpperCamelCase__ , sum(UpperCamelCase__ ) ) return ans else: raise Exception( """vector must have the same size as the """ """number of columns of the matrix!""" ) elif isinstance(UpperCamelCase__ , (int, float) ): # matrix-scalar __magic_name__ = [ [self.__matrix[i][j] * other for j in range(self.__width )] for i in range(self.__height ) ] return Matrix(UpperCamelCase__ , self.__width , self.__height ) return None def _lowercase ( self : Tuple ) -> int: """simple docstring""" return self.__height def _lowercase ( self : Any ) -> int: """simple docstring""" return self.__width def _lowercase ( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: return self.__matrix[x][y] else: raise Exception("""change_component: indices out of bounds""" ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : float ) -> None: """simple docstring""" if 0 <= x < self.__height and 0 <= y < self.__width: __magic_name__ = value else: raise Exception("""change_component: indices out of bounds""" ) def _lowercase ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float: """simple docstring""" if self.__height != self.__width: raise Exception("""Matrix is not square""" ) __magic_name__ = self.__matrix[:x] + self.__matrix[x + 1 :] for i in range(len(UpperCamelCase__ ) ): __magic_name__ = minor[i][:y] + minor[i][y + 1 :] return Matrix(UpperCamelCase__ , self.__width - 1 , self.__height - 1 ).determinant() def _lowercase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> float: """simple docstring""" if self.__height != self.__width: raise Exception("""Matrix is not square""" ) if 0 <= x < self.__height and 0 <= y < self.__width: return (-1) ** (x + y) * self.minor(UpperCamelCase__ , UpperCamelCase__ ) else: raise Exception("""Indices out of bounds""" ) def _lowercase ( self : str ) -> float: """simple docstring""" if self.__height != self.__width: raise Exception("""Matrix is not square""" ) if self.__height < 1: raise Exception("""Matrix has no element""" ) elif self.__height == 1: return self.__matrix[0][0] elif self.__height == 2: return ( self.__matrix[0][0] * self.__matrix[1][1] - self.__matrix[0][1] * self.__matrix[1][0] ) else: __magic_name__ = [ self.__matrix[0][y] * self.cofactor(0 , UpperCamelCase__ ) for y in range(self.__width ) ] return sum(UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = [[0] * n for _ in range(A_ )] return Matrix(A_, A_, A_ ) def a__ ( A_, A_, A_, A_ ): '''simple docstring''' random.seed(A_ ) __magic_name__ = [ [random.randint(A_, A_ ) for _ in range(A_ )] for _ in range(A_ ) ] return Matrix(A_, A_, A_ )
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
import fire from utils import calculate_rouge, save_json def a__ ( A_, A_, A_=None, **A_ ): '''simple docstring''' __magic_name__ = [x.strip() for x in open(A_ ).readlines()] __magic_name__ = [x.strip() for x in open(A_ ).readlines()][: len(A_ )] __magic_name__ = calculate_rouge(A_, A_, **A_ ) if save_path is not None: save_json(A_, A_, indent=A_ ) return metrics # these print nicely if __name__ == "__main__": fire.Fire(calculate_rouge_path)
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
from collections.abc import Callable def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = a __magic_name__ = b if function(A_ ) == 0: # one of the a or b is a root for the function return a elif function(A_ ) == 0: return b elif ( function(A_ ) * function(A_ ) > 0 ): # if none of these are root and they are both positive or negative, # then this algorithm can't find the root raise ValueError("""could not find root in given interval.""" ) else: __magic_name__ = start + (end - start) / 2.0 while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7 if function(A_ ) == 0: return mid elif function(A_ ) * function(A_ ) < 0: __magic_name__ = mid else: __magic_name__ = mid __magic_name__ = start + (end - start) / 2.0 return mid def a__ ( A_ ): '''simple docstring''' return x**3 - 2 * x - 5 if __name__ == "__main__": print(bisection(f, 1, 1000)) import doctest doctest.testmod()
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : str = { 'asapp/sew-tiny-100k': 'https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json', # See all SEW models at https://huggingface.co/models?filter=sew } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew""" def __init__( self : Optional[Any] , UpperCamelCase__ : int=32 , UpperCamelCase__ : List[str]=768 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Any=12 , UpperCamelCase__ : Optional[Any]=3072 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : int="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[str]=0.0 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Union[str, Any]=1E-5 , UpperCamelCase__ : Optional[int]="group" , UpperCamelCase__ : Optional[Any]="gelu" , UpperCamelCase__ : Dict=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : List[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Any=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : str=128 , UpperCamelCase__ : List[Any]=16 , UpperCamelCase__ : int=True , UpperCamelCase__ : str=0.05 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : List[Any]=2 , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : Any=10 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : Union[str, Any]="mean" , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : Any=False , UpperCamelCase__ : Optional[Any]=256 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Tuple=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layerdrop __magic_name__ = layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : int ) -> Dict: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
1
__lowerCAmelCase : Dict = 'Input must be a string of 8 numbers plus letter' __lowerCAmelCase : Optional[int] = 'TRWAGMYFPDXBNJZSQVHLCKE' def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): __magic_name__ = f'''Expected string as input, found {type(A_ ).__name__}''' raise TypeError(A_ ) __magic_name__ = spanish_id.replace("""-""", """""" ).upper() if len(A_ ) != 9: raise ValueError(A_ ) try: __magic_name__ = int(spanish_id_clean[0:8] ) __magic_name__ = spanish_id_clean[8] except ValueError as ex: raise ValueError(A_ ) from ex if letter.isdigit(): raise ValueError(A_ ) return letter == LOOKUP_LETTERS[number % 23] if __name__ == "__main__": import doctest doctest.testmod()
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
import argparse from collections import defaultdict import yaml __lowerCAmelCase : str = 'docs/source/en/_toctree.yml' def a__ ( A_ ): '''simple docstring''' __magic_name__ = defaultdict(A_ ) __magic_name__ = [] __magic_name__ = [] for doc in doc_list: if "local" in doc: counts[doc["local"]] += 1 if doc["title"].lower() == "overview": overview_doc.append({"""local""": doc["""local"""], """title""": doc["""title"""]} ) else: new_doc_list.append(A_ ) __magic_name__ = new_doc_list __magic_name__ = [key for key, value in counts.items() if value > 1] __magic_name__ = [] for duplicate_key in duplicates: __magic_name__ = list({doc["""title"""] for doc in doc_list if doc["""local"""] == duplicate_key} ) if len(A_ ) > 1: raise ValueError( f'''{duplicate_key} is present several times in the documentation table of content at ''' """`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """ """others.""" ) # Only add this once new_doc.append({"""local""": duplicate_key, """title""": titles[0]} ) # Add none duplicate-keys new_doc.extend([doc for doc in doc_list if """local""" not in counts or counts[doc["""local"""]] == 1] ) __magic_name__ = sorted(A_, key=lambda A_ : s["title"].lower() ) # "overview" gets special treatment and is always first if len(A_ ) > 1: raise ValueError("""{doc_list} has two 'overview' docs which is not allowed.""" ) overview_doc.extend(A_ ) # Sort return overview_doc def a__ ( A_=False ): '''simple docstring''' with open(A_, encoding="""utf-8""" ) as f: __magic_name__ = yaml.safe_load(f.read() ) # Get to the API doc __magic_name__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 __magic_name__ = content[api_idx]["""sections"""] # Then to the model doc __magic_name__ = 0 while api_doc[scheduler_idx]["title"] != "Schedulers": scheduler_idx += 1 __magic_name__ = api_doc[scheduler_idx]["""sections"""] __magic_name__ = clean_doc_toc(A_ ) __magic_name__ = False if new_scheduler_doc != scheduler_doc: __magic_name__ = True if overwrite: __magic_name__ = new_scheduler_doc if diff: if overwrite: __magic_name__ = api_doc with open(A_, """w""", encoding="""utf-8""" ) as f: f.write(yaml.dump(A_, allow_unicode=A_ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) def a__ ( A_=False ): '''simple docstring''' with open(A_, encoding="""utf-8""" ) as f: __magic_name__ = yaml.safe_load(f.read() ) # Get to the API doc __magic_name__ = 0 while content[api_idx]["title"] != "API": api_idx += 1 __magic_name__ = content[api_idx]["""sections"""] # Then to the model doc __magic_name__ = 0 while api_doc[pipeline_idx]["title"] != "Pipelines": pipeline_idx += 1 __magic_name__ = False __magic_name__ = api_doc[pipeline_idx]["""sections"""] __magic_name__ = [] # sort sub pipeline docs for pipeline_doc in pipeline_docs: if "section" in pipeline_doc: __magic_name__ = pipeline_doc["""section"""] __magic_name__ = clean_doc_toc(A_ ) if overwrite: __magic_name__ = new_sub_pipeline_doc new_pipeline_docs.append(A_ ) # sort overall pipeline doc __magic_name__ = clean_doc_toc(A_ ) if new_pipeline_docs != pipeline_docs: __magic_name__ = True if overwrite: __magic_name__ = new_pipeline_docs if diff: if overwrite: __magic_name__ = api_doc with open(A_, """w""", encoding="""utf-8""" ) as f: f.write(yaml.dump(A_, allow_unicode=A_ ) ) else: raise ValueError( """The model doc part of the table of content is not properly sorted, run `make style` to fix this.""" ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() check_scheduler_doc(args.fix_and_overwrite) check_pipeline_doc(args.fix_and_overwrite)
76
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
1
def a__ ( A_ = 1000 ): '''simple docstring''' return sum(e for e in range(3, A_ ) if e % 3 == 0 or e % 5 == 0 ) if __name__ == "__main__": print(F'''{solution() = }''')
76
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
1
def a__ ( A_ ): '''simple docstring''' if n == 1 or not isinstance(A_, A_ ): return 0 elif n == 2: return 1 else: __magic_name__ = [0, 1] for i in range(2, n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def a__ ( A_ ): '''simple docstring''' __magic_name__ = 0 __magic_name__ = 2 while digits < n: index += 1 __magic_name__ = len(str(fibonacci(A_ ) ) ) return index def a__ ( A_ = 1000 ): '''simple docstring''' return fibonacci_digits_index(A_ ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
76
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
1
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
def a__ ( A_, A_ ): '''simple docstring''' while a != 0: __magic_name__ , __magic_name__ = b % a, a return b def a__ ( A_, A_ ): '''simple docstring''' if gcd(A_, A_ ) != 1: __magic_name__ = f'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(A_ ) __magic_name__ , __magic_name__ , __magic_name__ = 1, 0, a __magic_name__ , __magic_name__ , __magic_name__ = 0, 1, m while va != 0: __magic_name__ = ua // va __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
76
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
1
__lowerCAmelCase : str = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __lowerCAmelCase : int = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5] __lowerCAmelCase : Tuple = { 0: 'Sunday', 1: 'Monday', 2: 'Tuesday', 3: 'Wednesday', 4: 'Thursday', 5: 'Friday', 6: 'Saturday', } def a__ ( A_, A_, A_ ): '''simple docstring''' assert len(str(A_ ) ) > 2, "year should be in YYYY format" assert 1 <= month <= 12, "month should be between 1 to 12" assert 1 <= day <= 31, "day should be between 1 to 31" # Doomsday algorithm: __magic_name__ = year // 100 __magic_name__ = (5 * (century % 4) + 2) % 7 __magic_name__ = year % 100 __magic_name__ = centurian % 12 __magic_name__ = ( (centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor ) % 7 __magic_name__ = ( DOOMSDAY_NOT_LEAP[month - 1] if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0) else DOOMSDAY_LEAP[month - 1] ) __magic_name__ = (dooms_day + day - day_anchor) % 7 return WEEK_DAY_NAMES[week_day] if __name__ == "__main__": import doctest doctest.testmod()
76
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
1
from __future__ import annotations def a__ ( A_ = 4 ): '''simple docstring''' __magic_name__ = abs(A_ ) or 4 return [[1 + x + y * row_size for x in range(A_ )] for y in range(A_ )] def a__ ( A_ ): '''simple docstring''' return reverse_row(transpose(A_ ) ) # OR.. transpose(reverse_column(matrix)) def a__ ( A_ ): '''simple docstring''' return reverse_row(reverse_column(A_ ) ) # OR.. reverse_column(reverse_row(matrix)) def a__ ( A_ ): '''simple docstring''' return reverse_column(transpose(A_ ) ) # OR.. transpose(reverse_row(matrix)) def a__ ( A_ ): '''simple docstring''' __magic_name__ = [list(A_ ) for x in zip(*A_ )] return matrix def a__ ( A_ ): '''simple docstring''' __magic_name__ = matrix[::-1] return matrix def a__ ( A_ ): '''simple docstring''' __magic_name__ = [x[::-1] for x in matrix] return matrix def a__ ( A_ ): '''simple docstring''' for i in matrix: print(*A_ ) if __name__ == "__main__": __lowerCAmelCase : str = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 90 counterclockwise:\n') print_matrix(rotate_aa(matrix)) __lowerCAmelCase : List[str] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 180:\n') print_matrix(rotate_aaa(matrix)) __lowerCAmelCase : Optional[Any] = make_matrix() print('\norigin:\n') print_matrix(matrix) print('\nrotate 270 counterclockwise:\n') print_matrix(rotate_aaa(matrix))
76
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
1
import argparse import os import re import torch from flax.traverse_util import flatten_dict from tax import checkpoints from transformers import ( AutoTokenizer, PixaStructConfig, PixaStructForConditionalGeneration, PixaStructImageProcessor, PixaStructProcessor, PixaStructTextConfig, PixaStructVisionConfig, ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = checkpoints.load_tax_checkpoint(A_ ) __magic_name__ = flatten_dict(A_ ) return flax_params def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} __magic_name__ = { """token_embedder""": """embeddings""", """encoder_norm""": """layernorm""", """kernel""": """weight""", """.out""": """.output""", """scale""": """weight""", """embedders_0.pos_embedding""": """row_embedder.weight""", """embedders_1.pos_embedding""": """column_embedder.weight""", } __magic_name__ = { """query""": """attention.query""", """key""": """attention.key""", """value""": """attention.value""", """output.dense""": """output""", """encoder_decoder_attention.o""": """encoder_decoder_attention.attention.o""", """pre_self_attention_layer_norm""": """self_attention.layer_norm""", """pre_cross_attention_layer_norm""": """encoder_decoder_attention.layer_norm""", """mlp.""": """mlp.DenseReluDense.""", """pre_mlp_layer_norm""": """mlp.layer_norm""", """self_attention.o""": """self_attention.attention.o""", """decoder.embeddings.embedding""": """decoder.embed_tokens.weight""", """decoder.relpos_bias.rel_embedding""": """decoder.layer.0.self_attention.attention.relative_attention_bias.weight""", """decoder.decoder_norm.weight""": """decoder.final_layer_norm.weight""", """decoder.logits_dense.weight""": """decoder.lm_head.weight""", } for key in flax_dict.keys(): if "target" in key: # remove the first prefix from the key __magic_name__ = """.""".join(key[1:] ) # rename the key for old, new in CONVERSION_MAPPING.items(): __magic_name__ = new_key.replace(A_, A_ ) if "decoder" in new_key: for old, new in DECODER_CONVERSION_MAPPING.items(): __magic_name__ = new_key.replace(A_, A_ ) if "layers" in new_key and "decoder" not in new_key: # use regex to replace the layer number __magic_name__ = re.sub(R"""layers_(\d+)""", R"""layer.\1""", A_ ) __magic_name__ = new_key.replace("""encoder""", """encoder.encoder""" ) elif "layers" in new_key and "decoder" in new_key: # use regex to replace the layer number __magic_name__ = re.sub(R"""layers_(\d+)""", R"""layer.\1""", A_ ) __magic_name__ = flax_dict[key] __magic_name__ = {} # convert converted_dict into torch format for key in converted_dict.keys(): if ("embed_tokens" not in key) and ("embedder" not in key): __magic_name__ = torch.from_numpy(converted_dict[key].T ) else: __magic_name__ = torch.from_numpy(converted_dict[key] ) return converted_torch_dict def a__ ( A_, A_, A_=False, A_=False ): '''simple docstring''' __magic_name__ = get_flax_param(A_ ) if not use_large: __magic_name__ = PixaStructVisionConfig() __magic_name__ = PixaStructTextConfig() else: __magic_name__ = PixaStructVisionConfig( hidden_size=1536, d_ff=3968, num_attention_heads=24, num_hidden_layers=18 ) __magic_name__ = PixaStructTextConfig(hidden_size=1536, d_ff=3968, num_heads=24, num_layers=18 ) __magic_name__ = PixaStructConfig( vision_config=encoder_config.to_dict(), text_config=decoder_config.to_dict(), is_vqa=A_ ) __magic_name__ = PixaStructForConditionalGeneration(A_ ) __magic_name__ = rename_and_convert_flax_params(A_ ) model.load_state_dict(A_ ) __magic_name__ = AutoTokenizer.from_pretrained("""ybelkada/test-pix2struct-tokenizer""" ) __magic_name__ = PixaStructImageProcessor() __magic_name__ = PixaStructProcessor(image_processor=A_, tokenizer=A_ ) if use_large: __magic_name__ = 4096 __magic_name__ = True # mkdir if needed os.makedirs(A_, exist_ok=A_ ) model.save_pretrained(A_ ) processor.save_pretrained(A_ ) print("""Model saved in {}""".format(A_ ) ) if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('--t5x_checkpoint_path', default=None, type=str, help='Path to the original T5x checkpoint.') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--use_large', action='store_true', help='Use large model.') parser.add_argument('--is_vqa', action='store_true', help='Use large model.') __lowerCAmelCase : Optional[int] = parser.parse_args() convert_pixastruct_original_pytorch_checkpoint_to_hf( args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large )
76
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
1
import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def a__ ( A_ ): '''simple docstring''' return sum(param.float().sum() if """encoder.embeddings""" not in key else 0 for key, param in state_dict.items() ) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue __magic_name__ = key.replace("""heads.cmd.mim_head.cls.predictions""", """mmm_image_head""" ) __magic_name__ = key.replace("""heads.cmd.mlm_head.cls.predictions""", """mmm_text_head""" ) __magic_name__ = key.replace("""heads.cmd.itm_head.cls""", """itm_head""" ) __magic_name__ = key.replace("""heads.cmd.itm_head.pooler""", """itm_head.pooler""" ) __magic_name__ = key.replace("""heads.cmd.clip_head.logit_scale""", """flava.logit_scale""" ) __magic_name__ = key.replace("""heads.fairseq_mlm.cls.predictions""", """mlm_head""" ) __magic_name__ = key.replace("""heads.imagenet.mim_head.cls.predictions""", """mim_head""" ) __magic_name__ = key.replace("""mm_text_projection""", """flava.text_to_mm_projection""" ) __magic_name__ = key.replace("""mm_image_projection""", """flava.image_to_mm_projection""" ) __magic_name__ = key.replace("""image_encoder.module""", """flava.image_model""" ) __magic_name__ = key.replace("""text_encoder.module""", """flava.text_model""" ) __magic_name__ = key.replace("""mm_encoder.module.encoder.cls_token""", """flava.multimodal_model.cls_token""" ) __magic_name__ = key.replace("""mm_encoder.module""", """flava.multimodal_model""" ) __magic_name__ = key.replace("""text_projection""", """flava.text_projection""" ) __magic_name__ = key.replace("""image_projection""", """flava.image_projection""" ) __magic_name__ = value.float() for key, value in codebook_state_dict.items(): __magic_name__ = value return upgrade @torch.no_grad() def a__ ( A_, A_, A_, A_=None ): '''simple docstring''' if config_path is not None: __magic_name__ = FlavaConfig.from_pretrained(A_ ) else: __magic_name__ = FlavaConfig() __magic_name__ = FlavaForPreTraining(A_ ).eval() __magic_name__ = convert_dalle_checkpoint(A_, A_, save_checkpoint=A_ ) if os.path.exists(A_ ): __magic_name__ = torch.load(A_, map_location="""cpu""" ) else: __magic_name__ = torch.hub.load_state_dict_from_url(A_, map_location="""cpu""" ) __magic_name__ = upgrade_state_dict(A_, A_ ) hf_model.load_state_dict(A_ ) __magic_name__ = hf_model.state_dict() __magic_name__ = count_parameters(A_ ) __magic_name__ = count_parameters(A_ ) + count_parameters(A_ ) assert torch.allclose(A_, A_, atol=1e-3 ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to flava checkpoint') parser.add_argument('--codebook_path', default=None, type=str, help='Path to flava codebook checkpoint') parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert') __lowerCAmelCase : List[Any] = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
76
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
1
import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger __lowerCAmelCase : Dict = get_logger(__name__) __lowerCAmelCase : Union[str, Any] = Path(__file__).parent / 'model_card_template.md' __lowerCAmelCase : Optional[int] = uuida().hex __lowerCAmelCase : str = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES __lowerCAmelCase : Dict = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES __lowerCAmelCase : Any = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/' def a__ ( A_ = None ): '''simple docstring''' __magic_name__ = f'''diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}''' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'''; torch/{_torch_version}''' if is_flax_available(): ua += f'''; jax/{_jax_version}''' ua += f'''; flax/{_flax_version}''' if is_onnx_available(): ua += f'''; onnxruntime/{_onnxruntime_version}''' # CI will set this value to True if os.environ.get("""DIFFUSERS_IS_CI""", """""" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(A_, A_ ): ua += "; " + "; ".join(f'''{k}/{v}''' for k, v in user_agent.items() ) elif isinstance(A_, A_ ): ua += "; " + user_agent return ua def a__ ( A_, A_ = None, A_ = None ): '''simple docstring''' if token is None: __magic_name__ = HfFolder.get_token() if organization is None: __magic_name__ = whoami(A_ )["""name"""] return f'''{username}/{model_id}''' else: return f'''{organization}/{model_id}''' def a__ ( A_, A_ ): '''simple docstring''' if not is_jinja_available(): raise ValueError( """Modelcard rendering is based on Jinja templates.""" """ Please make sure to have `jinja` installed before using `create_model_card`.""" """ To install it, please run `pip install Jinja2`.""" ) if hasattr(A_, """local_rank""" ) and args.local_rank not in [-1, 0]: return __magic_name__ = args.hub_token if hasattr(A_, """hub_token""" ) else None __magic_name__ = get_full_repo_name(A_, token=A_ ) __magic_name__ = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="""en""", license="""apache-2.0""", library_name="""diffusers""", tags=[], datasets=args.dataset_name, metrics=[], ), template_path=A_, model_name=A_, repo_name=A_, dataset_name=args.dataset_name if hasattr(A_, """dataset_name""" ) else None, learning_rate=args.learning_rate, train_batch_size=args.train_batch_size, eval_batch_size=args.eval_batch_size, gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(A_, """gradient_accumulation_steps""" ) else None ), adam_betaa=args.adam_betaa if hasattr(A_, """adam_beta1""" ) else None, adam_betaa=args.adam_betaa if hasattr(A_, """adam_beta2""" ) else None, adam_weight_decay=args.adam_weight_decay if hasattr(A_, """adam_weight_decay""" ) else None, adam_epsilon=args.adam_epsilon if hasattr(A_, """adam_epsilon""" ) else None, lr_scheduler=args.lr_scheduler if hasattr(A_, """lr_scheduler""" ) else None, lr_warmup_steps=args.lr_warmup_steps if hasattr(A_, """lr_warmup_steps""" ) else None, ema_inv_gamma=args.ema_inv_gamma if hasattr(A_, """ema_inv_gamma""" ) else None, ema_power=args.ema_power if hasattr(A_, """ema_power""" ) else None, ema_max_decay=args.ema_max_decay if hasattr(A_, """ema_max_decay""" ) else None, mixed_precision=args.mixed_precision, ) __magic_name__ = os.path.join(args.output_dir, """README.md""" ) model_card.save(A_ ) def a__ ( A_, A_ = None ): '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash __magic_name__ = str(Path(A_ ).as_posix() ) __magic_name__ = re.search(R"""snapshots/([^/]+)/""", A_ ) if search is None: return None __magic_name__ = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(A_ ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. __lowerCAmelCase : List[str] = os.path.expanduser( os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface')) ) __lowerCAmelCase : str = os.path.join(hf_cache_home, 'diffusers') def a__ ( A_ = None, A_ = None ): '''simple docstring''' if new_cache_dir is None: __magic_name__ = DIFFUSERS_CACHE if old_cache_dir is None: __magic_name__ = old_diffusers_cache __magic_name__ = Path(A_ ).expanduser() __magic_name__ = Path(A_ ).expanduser() for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): __magic_name__ = new_cache_dir / old_blob_path.relative_to(A_ ) new_blob_path.parent.mkdir(parents=A_, exist_ok=A_ ) os.replace(A_, A_ ) try: os.symlink(A_, A_ ) except OSError: logger.warning( """Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). __lowerCAmelCase : Optional[int] = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt') if not os.path.isfile(cache_version_file): __lowerCAmelCase : Optional[int] = 0 else: with open(cache_version_file) as f: try: __lowerCAmelCase : Optional[int] = int(f.read()) except ValueError: __lowerCAmelCase : Optional[int] = 0 if cache_version < 1: __lowerCAmelCase : Dict = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( 'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your ' 'existing cached models. This is a one-time operation, you can interrupt it or run it ' 'later by calling `diffusers.utils.hub_utils.move_cache()`.' ) try: move_cache() except Exception as e: __lowerCAmelCase : Union[str, Any] = '\n'.join(traceback.format_tb(e.__traceback__)) logger.error( F'''There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease ''' 'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole ' 'message and we will do our best to help.' ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, 'w') as f: f.write('1') except Exception: logger.warning( F'''There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure ''' 'the directory exists and can be written to.' ) def a__ ( A_, A_ = None ): '''simple docstring''' if variant is not None: __magic_name__ = weights_name.split(""".""" ) __magic_name__ = splits[:-1] + [variant] + splits[-1:] __magic_name__ = """.""".join(A_ ) return weights_name def a__ ( A_, *, A_, A_, A_, A_, A_, A_, A_, A_, A_, A_, A_=None, ): '''simple docstring''' __magic_name__ = str(A_ ) if os.path.isfile(A_ ): return pretrained_model_name_or_path elif os.path.isdir(A_ ): if os.path.isfile(os.path.join(A_, A_ ) ): # Load from a PyTorch checkpoint __magic_name__ = os.path.join(A_, A_ ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(A_, A_, A_ ) ): __magic_name__ = os.path.join(A_, A_, A_ ) return model_file else: raise EnvironmentError( f'''Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.''' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(A_ ).base_version ) >= version.parse("""0.20.0""" ) ): try: __magic_name__ = hf_hub_download( A_, filename=_add_variant(A_, A_ ), cache_dir=A_, force_download=A_, proxies=A_, resume_download=A_, local_files_only=A_, use_auth_token=A_, user_agent=A_, subfolder=A_, revision=revision or commit_hash, ) warnings.warn( f'''Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.''', A_, ) return model_file except: # noqa: E722 warnings.warn( f'''You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(A_, A_ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(A_, A_ )}\' so that the correct variant file can be added.''', A_, ) try: # 2. Load model file as usual __magic_name__ = hf_hub_download( A_, filename=A_, cache_dir=A_, force_download=A_, proxies=A_, resume_download=A_, local_files_only=A_, use_auth_token=A_, user_agent=A_, subfolder=A_, revision=revision or commit_hash, ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ''' """listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a """ """token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """ """login`.""" ) except RevisionNotFoundError: raise EnvironmentError( f'''{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ''' """this model name. Check the model page at """ f'''\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.''' ) except EntryNotFoundError: raise EnvironmentError( f'''{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.''' ) except HTTPError as err: raise EnvironmentError( f'''There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}''' ) except ValueError: raise EnvironmentError( f'''We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it''' f''' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a''' f''' directory containing a file named {weights_name} or''' """ \nCheckout your internet connection or see how to run the library in""" """ offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'.""" ) except EnvironmentError: raise EnvironmentError( f'''Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ''' """'https://huggingface.co/models', make sure you don't have a local directory with the same name. """ f'''Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ''' f'''containing a file named {weights_name}''' )
76
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
1
def a__ ( A_, A_ ): '''simple docstring''' if digit_amount > 0: return round(number - int(A_ ), A_ ) return number - int(A_ ) if __name__ == "__main__": print(decimal_isolate(1.53, 0)) print(decimal_isolate(35.345, 1)) print(decimal_isolate(35.345, 2)) print(decimal_isolate(35.345, 3)) print(decimal_isolate(-14.789, 3)) print(decimal_isolate(0, 2)) print(decimal_isolate(-14.123, 1)) print(decimal_isolate(-14.123, 2)) print(decimal_isolate(-14.123, 3))
76
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
1
from dataclasses import dataclass from typing import Optional, Tuple import torch from torch import nn from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel from transformers.utils import ModelOutput @dataclass class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = None a__ = None a__ = None a__ = None class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict="cls" , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Optional[int]=True , **UpperCamelCase__ : int , ) -> Optional[Any]: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = project_dim __magic_name__ = pooler_fn __magic_name__ = learn_encoder __magic_name__ = use_attention_mask class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [R"""pooler""", R"""logit_scale"""] a__ = [R"""position_ids""", R"""predictions.decoder.bias"""] a__ = """roberta""" a__ = RobertaSeriesConfig def __init__( self : str , UpperCamelCase__ : List[str] ) -> List[str]: """simple docstring""" super().__init__(UpperCamelCase__ ) __magic_name__ = XLMRobertaModel(UpperCamelCase__ ) __magic_name__ = nn.Linear(config.hidden_size , config.project_dim ) __magic_name__ = getattr(UpperCamelCase__ , """has_pre_transformation""" , UpperCamelCase__ ) if self.has_pre_transformation: __magic_name__ = nn.Linear(config.hidden_size , config.project_dim ) __magic_name__ = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps ) self.post_init() def _lowercase ( self : str , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[bool] = None , ) -> str: """simple docstring""" __magic_name__ = return_dict if return_dict is not None else self.config.use_return_dict __magic_name__ = self.base_model( input_ids=UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , position_ids=UpperCamelCase__ , head_mask=UpperCamelCase__ , inputs_embeds=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , encoder_attention_mask=UpperCamelCase__ , output_attentions=UpperCamelCase__ , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=UpperCamelCase__ , ) if self.has_pre_transformation: __magic_name__ = outputs["""hidden_states"""][-2] __magic_name__ = self.pre_LN(UpperCamelCase__ ) __magic_name__ = self.transformation_pre(UpperCamelCase__ ) return TransformationModelOutput( projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , ) else: __magic_name__ = self.transformation(outputs.last_hidden_state ) return TransformationModelOutput( projection_state=UpperCamelCase__ , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
76
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
1
import numpy as np import torch import tqdm from ...models.unet_ad import UNetaDModel from ...pipelines import DiffusionPipeline from ...utils import randn_tensor from ...utils.dummy_pt_objects import DDPMScheduler class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Union[str, Any] , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : UNetaDModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : List[Any] , ) -> Tuple: """simple docstring""" super().__init__() __magic_name__ = value_function __magic_name__ = unet __magic_name__ = scheduler __magic_name__ = env __magic_name__ = env.get_dataset() __magic_name__ = {} for key in self.data.keys(): try: __magic_name__ = self.data[key].mean() except: # noqa: E722 pass __magic_name__ = {} for key in self.data.keys(): try: __magic_name__ = self.data[key].std() except: # noqa: E722 pass __magic_name__ = env.observation_space.shape[0] __magic_name__ = env.action_space.shape[0] def _lowercase ( self : List[str] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ) -> List[Any]: """simple docstring""" return (x_in - self.means[key]) / self.stds[key] def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple ) -> Tuple: """simple docstring""" return x_in * self.stds[key] + self.means[key] def _lowercase ( self : str , UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" if type(UpperCamelCase__ ) is dict: return {k: self.to_torch(UpperCamelCase__ ) for k, v in x_in.items()} elif torch.is_tensor(UpperCamelCase__ ): return x_in.to(self.unet.device ) return torch.tensor(UpperCamelCase__ , device=self.unet.device ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : List[Any] ) -> Tuple: """simple docstring""" for key, val in cond.items(): __magic_name__ = val.clone() return x_in def _lowercase ( self : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = x.shape[0] __magic_name__ = None for i in tqdm.tqdm(self.scheduler.timesteps ): # create batch of timesteps to pass into model __magic_name__ = torch.full((batch_size,) , UpperCamelCase__ , device=self.unet.device , dtype=torch.long ) for _ in range(UpperCamelCase__ ): with torch.enable_grad(): x.requires_grad_() # permute to match dimension for pre-trained models __magic_name__ = self.value_function(x.permute(0 , 2 , 1 ) , UpperCamelCase__ ).sample __magic_name__ = torch.autograd.grad([y.sum()] , [x] )[0] __magic_name__ = self.scheduler._get_variance(UpperCamelCase__ ) __magic_name__ = torch.exp(0.5 * posterior_variance ) __magic_name__ = model_std * grad __magic_name__ = 0 __magic_name__ = x.detach() __magic_name__ = x + scale * grad __magic_name__ = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim ) __magic_name__ = self.unet(x.permute(0 , 2 , 1 ) , UpperCamelCase__ ).sample.permute(0 , 2 , 1 ) # TODO: verify deprecation of this kwarg __magic_name__ = self.scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , predict_epsilon=UpperCamelCase__ )["""prev_sample"""] # apply conditions to the trajectory (set the initial state) __magic_name__ = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim ) __magic_name__ = self.to_torch(UpperCamelCase__ ) return x, y def __call__( self : Any , UpperCamelCase__ : int , UpperCamelCase__ : Any=64 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Optional[int]=0.1 ) -> Any: """simple docstring""" __magic_name__ = self.normalize(UpperCamelCase__ , """observations""" ) __magic_name__ = obs[None].repeat(UpperCamelCase__ , axis=0 ) __magic_name__ = {0: self.to_torch(UpperCamelCase__ )} __magic_name__ = (batch_size, planning_horizon, self.state_dim + self.action_dim) # generate initial noise and apply our conditions (to make the trajectories start at current state) __magic_name__ = randn_tensor(UpperCamelCase__ , device=self.unet.device ) __magic_name__ = self.reset_xa(UpperCamelCase__ , UpperCamelCase__ , self.action_dim ) __magic_name__ = self.to_torch(UpperCamelCase__ ) # run the diffusion process __magic_name__ , __magic_name__ = self.run_diffusion(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) # sort output trajectories by value __magic_name__ = y.argsort(0 , descending=UpperCamelCase__ ).squeeze() __magic_name__ = x[sorted_idx] __magic_name__ = sorted_values[:, :, : self.action_dim] __magic_name__ = actions.detach().cpu().numpy() __magic_name__ = self.de_normalize(UpperCamelCase__ , key="""actions""" ) # select the action with the highest value if y is not None: __magic_name__ = 0 else: # if we didn't run value guiding, select a random action __magic_name__ = np.random.randint(0 , UpperCamelCase__ ) __magic_name__ = denorm_actions[selected_index, 0] return denorm_actions
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ) or number < 0: raise ValueError("""Input must be a non-negative integer""" ) __magic_name__ = 0 while number: # This way we arrive at next set bit (next 1) instead of looping # through each bit and checking for 1s hence the # loop won't run 32 times it will only run the number of `1` times number &= number - 1 count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
76
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
1
from ...processing_utils import ProcessorMixin class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """WhisperFeatureExtractor""" a__ = """WhisperTokenizer""" def __init__( self : Any , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] ) -> List[Any]: """simple docstring""" super().__init__(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = self.feature_extractor __magic_name__ = False def _lowercase ( self : Dict , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Any=True ) -> Tuple: """simple docstring""" return self.tokenizer.get_decoder_prompt_ids(task=UpperCamelCase__ , language=UpperCamelCase__ , no_timestamps=UpperCamelCase__ ) def __call__( self : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ) -> Dict: """simple docstring""" if self._in_target_context_manager: return self.current_processor(*UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = kwargs.pop("""audio""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""sampling_rate""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""text""" , UpperCamelCase__ ) if len(UpperCamelCase__ ) > 0: __magic_name__ = args[0] __magic_name__ = args[1:] if audio is None and text is None: raise ValueError("""You need to specify either an `audio` or `text` input to process.""" ) if audio is not None: __magic_name__ = self.feature_extractor(UpperCamelCase__ , *UpperCamelCase__ , sampling_rate=UpperCamelCase__ , **UpperCamelCase__ ) if text is not None: __magic_name__ = self.tokenizer(UpperCamelCase__ , **UpperCamelCase__ ) if text is None: return inputs elif audio is None: return encodings else: __magic_name__ = encodings["""input_ids"""] return inputs def _lowercase ( self : Optional[int] , *UpperCamelCase__ : str , **UpperCamelCase__ : Tuple ) -> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : int , *UpperCamelCase__ : Dict , **UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" return self.tokenizer.decode(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Dict , UpperCamelCase__ : str , UpperCamelCase__ : List[str]="np" ) -> List[str]: """simple docstring""" return self.tokenizer.get_prompt_ids(UpperCamelCase__ , return_tensors=UpperCamelCase__ )
76
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
1
import os def a__ ( ): '''simple docstring''' __magic_name__ = os.path.join(os.path.dirname(A_ ), """num.txt""" ) with open(A_ ) as file_hand: return str(sum(int(A_ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
import qiskit def a__ ( A_ = 2 ): '''simple docstring''' __magic_name__ = qubits # Using Aer's simulator __magic_name__ = qiskit.Aer.get_backend("""aer_simulator""" ) # Creating a Quantum Circuit acting on the q register __magic_name__ = qiskit.QuantumCircuit(A_, A_ ) # Adding a H gate on qubit 0 (now q0 in superposition) circuit.h(0 ) for i in range(1, A_ ): # Adding CX (CNOT) gate circuit.cx(i - 1, A_ ) # Mapping the quantum measurement to the classical bits circuit.measure(list(range(A_ ) ), list(range(A_ ) ) ) # Now measuring any one qubit would affect other qubits to collapse # their super position and have same state as the measured one. # Executing the circuit on the simulator __magic_name__ = qiskit.execute(A_, A_, shots=1000 ) return job.result().get_counts(A_ ) if __name__ == "__main__": print(F'''Total count for various states are: {quantum_entanglement(3)}''')
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
import warnings from ...utils import logging from .image_processing_layoutlmva import LayoutLMvaImageProcessor __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Tuple , *UpperCamelCase__ : Dict , **UpperCamelCase__ : List[str] ) -> None: """simple docstring""" warnings.warn( """The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers.""" """ Please use LayoutLMv2ImageProcessor instead.""" , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available __lowerCAmelCase : Optional[Any] = { 'configuration_clipseg': [ 'CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CLIPSegConfig', 'CLIPSegTextConfig', 'CLIPSegVisionConfig', ], 'processing_clipseg': ['CLIPSegProcessor'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST', 'CLIPSegModel', 'CLIPSegPreTrainedModel', 'CLIPSegTextModel', 'CLIPSegVisionModel', 'CLIPSegForImageSegmentation', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys __lowerCAmelCase : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : int = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'BAAI/AltCLIP': 'https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json', # See all AltCLIP models at https://huggingface.co/models?filter=altclip } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """altclip_text_model""" def __init__( self : Tuple , UpperCamelCase__ : Union[str, Any]=25_0002 , UpperCamelCase__ : Optional[int]=1024 , UpperCamelCase__ : str=24 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : List[Any]=4096 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=514 , UpperCamelCase__ : Tuple=1 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : int=1E-05 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Any=2 , UpperCamelCase__ : Optional[Any]="absolute" , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[Any]=768 , **UpperCamelCase__ : List[str] , ) -> List[Any]: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = initializer_factor __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = use_cache __magic_name__ = project_dim class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """altclip_vision_model""" def __init__( self : Optional[int] , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Union[str, Any]=3072 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Optional[int]=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Optional[Any]=3 , UpperCamelCase__ : Optional[int]=224 , UpperCamelCase__ : int=32 , UpperCamelCase__ : Union[str, Any]="quick_gelu" , UpperCamelCase__ : Tuple=1E-5 , UpperCamelCase__ : Tuple=0.0 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Union[str, Any]=1.0 , **UpperCamelCase__ : List[Any] , ) -> Tuple: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = intermediate_size __magic_name__ = projection_dim __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = num_channels __magic_name__ = patch_size __magic_name__ = image_size __magic_name__ = initializer_range __magic_name__ = initializer_factor __magic_name__ = attention_dropout __magic_name__ = layer_norm_eps __magic_name__ = hidden_act @classmethod def _lowercase ( cls : List[str] , UpperCamelCase__ : Union[str, os.PathLike] , **UpperCamelCase__ : List[Any] ) -> "PretrainedConfig": """simple docstring""" cls._set_token_in_kwargs(UpperCamelCase__ ) __magic_name__ , __magic_name__ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ ) # get the vision config dict if we are loading from AltCLIPConfig if config_dict.get("""model_type""" ) == "altclip": __magic_name__ = config_dict["""vision_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """altclip""" a__ = True def __init__( self : int , UpperCamelCase__ : Dict=None , UpperCamelCase__ : Tuple=None , UpperCamelCase__ : int=768 , UpperCamelCase__ : int=2.6592 , **UpperCamelCase__ : int ) -> List[Any]: """simple docstring""" __magic_name__ = kwargs.pop("""text_config_dict""" , UpperCamelCase__ ) __magic_name__ = kwargs.pop("""vision_config_dict""" , UpperCamelCase__ ) super().__init__(**UpperCamelCase__ ) # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`. if text_config_dict is not None: if text_config is None: __magic_name__ = {} # This is the complete result when using `text_config_dict`. __magic_name__ = AltCLIPTextConfig(**UpperCamelCase__ ).to_dict() # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different. for key, value in _text_config_dict.items(): if key in text_config and value != text_config[key] and key not in ["transformers_version"]: # If specified in `text_config_dict` if key in text_config_dict: __magic_name__ = ( F'''`{key}` is found in both `text_config_dict` and `text_config` but with different values. ''' F'''The value `text_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __magic_name__ = ( F'''`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The ''' F'''value `text_config["{key}"]` will be overriden.''' ) logger.warning(UpperCamelCase__ ) # Update all values in `text_config` with the ones in `_text_config_dict`. text_config.update(_text_config_dict ) if vision_config_dict is not None: if vision_config is None: __magic_name__ = {} # This is the complete result when using `vision_config_dict`. __magic_name__ = AltCLIPVisionConfig(**UpperCamelCase__ ).to_dict() # convert keys to string instead of integer if "id2label" in _vision_config_dict: __magic_name__ = { str(UpperCamelCase__ ): value for key, value in _vision_config_dict["""id2label"""].items() } # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different. for key, value in _vision_config_dict.items(): if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]: # If specified in `vision_config_dict` if key in vision_config_dict: __magic_name__ = ( F'''`{key}` is found in both `vision_config_dict` and `vision_config` but with different ''' F'''values. The value `vision_config_dict["{key}"]` will be used instead.''' ) # If inferred from default argument values (just to be super careful) else: __magic_name__ = ( F'''`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. ''' F'''The value `vision_config["{key}"]` will be overriden.''' ) logger.warning(UpperCamelCase__ ) # Update all values in `vision_config` with the ones in `_vision_config_dict`. vision_config.update(_vision_config_dict ) if text_config is None: __magic_name__ = {} logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" ) if vision_config is None: __magic_name__ = {} logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" ) __magic_name__ = AltCLIPTextConfig(**UpperCamelCase__ ) __magic_name__ = AltCLIPVisionConfig(**UpperCamelCase__ ) __magic_name__ = projection_dim __magic_name__ = logit_scale_init_value __magic_name__ = 1.0 @classmethod def _lowercase ( cls : str , UpperCamelCase__ : AltCLIPTextConfig , UpperCamelCase__ : AltCLIPVisionConfig , **UpperCamelCase__ : Any ) -> Tuple: """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ ) def _lowercase ( self : List[str] ) -> str: """simple docstring""" __magic_name__ = copy.deepcopy(self.__dict__ ) __magic_name__ = self.text_config.to_dict() __magic_name__ = self.vision_config.to_dict() __magic_name__ = self.__class__.model_type return output
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
import logging import os import sys from dataclasses import dataclass, field from importlib import import_module from typing import Dict, List, Optional, Tuple import numpy as np from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score from torch import nn from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorWithPadding, EvalPrediction, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import is_main_process __lowerCAmelCase : str = logging.getLogger(__name__) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) a__ = field( default=_A , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) a__ = field( default="""NER""" , metadata={"""help""": """Task type to fine tune in training (e.g. NER, POS, etc)"""} ) a__ = field( default=_A , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) a__ = field(default=_A , metadata={"""help""": """Set this flag to use fast tokenization."""} ) # If you want to tweak more attributes on your tokenizer, you should do it in a distinct script, # or just modify its tokenizer_config.json. a__ = field( default=_A , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) @dataclass class UpperCAmelCase_ : '''simple docstring''' a__ = field( metadata={"""help""": """The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task."""} ) a__ = field( default=_A , metadata={"""help""": """Path to a file containing all labels. If not specified, CoNLL-2003 labels are used."""} , ) a__ = field( default=1_28 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) a__ = field( default=_A , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} ) def a__ ( ): '''simple docstring''' __magic_name__ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. __magic_name__ , __magic_name__ , __magic_name__ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: __magic_name__ , __magic_name__ , __magic_name__ = parser.parse_args_into_dataclasses() if ( os.path.exists(training_args.output_dir ) and os.listdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f'''Output directory ({training_args.output_dir}) already exists and is not empty. Use''' """ --overwrite_output_dir to overcome.""" ) __magic_name__ = import_module("""tasks""" ) try: __magic_name__ = getattr(A_, model_args.task_type ) __magic_name__ = token_classification_task_clazz() except AttributeError: raise ValueError( f'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. ''' f'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("""Training/evaluation parameters %s""", A_ ) # Set seed set_seed(training_args.seed ) # Prepare CONLL-2003 task __magic_name__ = token_classification_task.get_labels(data_args.labels ) __magic_name__ = dict(enumerate(A_ ) ) __magic_name__ = len(A_ ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. __magic_name__ = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=A_, idalabel=A_, labelaid={label: i for i, label in enumerate(A_ )}, cache_dir=model_args.cache_dir, ) __magic_name__ = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=model_args.use_fast, ) __magic_name__ = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(""".ckpt""" in model_args.model_name_or_path ), config=A_, cache_dir=model_args.cache_dir, ) # Get datasets __magic_name__ = ( TokenClassificationDataset( token_classification_task=A_, data_dir=data_args.data_dir, tokenizer=A_, labels=A_, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, ) if training_args.do_train else None ) __magic_name__ = ( TokenClassificationDataset( token_classification_task=A_, data_dir=data_args.data_dir, tokenizer=A_, labels=A_, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, ) if training_args.do_eval else None ) def align_predictions(A_, A_ ) -> Tuple[List[int], List[int]]: __magic_name__ = np.argmax(A_, axis=2 ) __magic_name__ , __magic_name__ = preds.shape __magic_name__ = [[] for _ in range(A_ )] __magic_name__ = [[] for _ in range(A_ )] for i in range(A_ ): for j in range(A_ ): if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index: out_label_list[i].append(label_map[label_ids[i][j]] ) preds_list[i].append(label_map[preds[i][j]] ) return preds_list, out_label_list def compute_metrics(A_ ) -> Dict: __magic_name__ , __magic_name__ = align_predictions(p.predictions, p.label_ids ) return { "accuracy_score": accuracy_score(A_, A_ ), "precision": precision_score(A_, A_ ), "recall": recall_score(A_, A_ ), "f1": fa_score(A_, A_ ), } # Data collator __magic_name__ = DataCollatorWithPadding(A_, pad_to_multiple_of=8 ) if training_args.fpaa else None # Initialize our Trainer __magic_name__ = Trainer( model=A_, args=A_, train_dataset=A_, eval_dataset=A_, compute_metrics=A_, data_collator=A_, ) # Training if training_args.do_train: trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) trainer.save_model() # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) if trainer.is_world_process_zero(): tokenizer.save_pretrained(training_args.output_dir ) # Evaluation __magic_name__ = {} if training_args.do_eval: logger.info("""*** Evaluate ***""" ) __magic_name__ = trainer.evaluate() __magic_name__ = os.path.join(training_args.output_dir, """eval_results.txt""" ) if trainer.is_world_process_zero(): with open(A_, """w""" ) as writer: logger.info("""***** Eval results *****""" ) for key, value in result.items(): logger.info(""" %s = %s""", A_, A_ ) writer.write("""%s = %s\n""" % (key, value) ) results.update(A_ ) # Predict if training_args.do_predict: __magic_name__ = TokenClassificationDataset( token_classification_task=A_, data_dir=data_args.data_dir, tokenizer=A_, labels=A_, model_type=config.model_type, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.test, ) __magic_name__ , __magic_name__ , __magic_name__ = trainer.predict(A_ ) __magic_name__ , __magic_name__ = align_predictions(A_, A_ ) __magic_name__ = os.path.join(training_args.output_dir, """test_results.txt""" ) if trainer.is_world_process_zero(): with open(A_, """w""" ) as writer: for key, value in metrics.items(): logger.info(""" %s = %s""", A_, A_ ) writer.write("""%s = %s\n""" % (key, value) ) # Save predictions __magic_name__ = os.path.join(training_args.output_dir, """test_predictions.txt""" ) if trainer.is_world_process_zero(): with open(A_, """w""" ) as writer: with open(os.path.join(data_args.data_dir, """test.txt""" ), """r""" ) as f: token_classification_task.write_predictions_to_file(A_, A_, A_ ) return results def a__ ( A_ ): '''simple docstring''' main() if __name__ == "__main__": main()
76
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
1
import warnings from ...utils import logging from .image_processing_donut import DonutImageProcessor __lowerCAmelCase : Tuple = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Optional[int] , *UpperCamelCase__ : Any , **UpperCamelCase__ : Optional[int] ) -> None: """simple docstring""" warnings.warn( """The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use DonutImageProcessor instead.""" , UpperCamelCase__ , ) super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available from ...utils import OptionalDependencyNotAvailable __lowerCAmelCase : Tuple = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']} try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : str = ['GPTNeoXTokenizerFast'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Union[str, Any] = [ 'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST', 'GPTNeoXForCausalLM', 'GPTNeoXForQuestionAnswering', 'GPTNeoXForSequenceClassification', 'GPTNeoXForTokenClassification', 'GPTNeoXLayer', 'GPTNeoXModel', 'GPTNeoXPreTrainedModel', ] if TYPE_CHECKING: from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_gpt_neox import ( GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST, GPTNeoXForCausalLM, GPTNeoXForQuestionAnswering, GPTNeoXForSequenceClassification, GPTNeoXForTokenClassification, GPTNeoXLayer, GPTNeoXModel, GPTNeoXPreTrainedModel, ) else: import sys __lowerCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
1
import argparse import re import numpy as np import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( SamConfig, SamImageProcessor, SamModel, SamProcessor, SamVisionConfig, ) __lowerCAmelCase : str = { 'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in', 'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0', 'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out', 'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1', 'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm', 'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2', 'mask_downscaling.0': 'mask_embed.conv1', 'mask_downscaling.1': 'mask_embed.layer_norm1', 'mask_downscaling.3': 'mask_embed.conv2', 'mask_downscaling.4': 'mask_embed.layer_norm2', 'mask_downscaling.6': 'mask_embed.conv3', 'point_embeddings': 'point_embed', 'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding', 'image_encoder': 'vision_encoder', 'neck.0': 'neck.conv1', 'neck.1': 'neck.layer_norm1', 'neck.2': 'neck.conv2', 'neck.3': 'neck.layer_norm2', 'patch_embed.proj': 'patch_embed.projection', '.norm': '.layer_norm', 'blocks': 'layers', } def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} state_dict.pop("""pixel_mean""", A_ ) state_dict.pop("""pixel_std""", A_ ) __magic_name__ = R""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*""" for key, value in state_dict.items(): for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: __magic_name__ = key.replace(A_, A_ ) if re.match(A_, A_ ): __magic_name__ = int(re.match(A_, A_ ).group(2 ) ) if layer_nb == 0: __magic_name__ = key.replace("""layers.0""", """proj_in""" ) elif layer_nb == 1: __magic_name__ = key.replace("""layers.1""", """layers.0""" ) elif layer_nb == 2: __magic_name__ = key.replace("""layers.2""", """proj_out""" ) __magic_name__ = value __magic_name__ = model_state_dict[ """prompt_encoder.shared_embedding.positional_embedding""" ] return model_state_dict def a__ ( A_, A_, A_, A_="ybelkada/segment-anything" ): '''simple docstring''' __magic_name__ = hf_hub_download(A_, f'''checkpoints/{model_name}.pth''' ) if "sam_vit_b" in model_name: __magic_name__ = SamConfig() elif "sam_vit_l" in model_name: __magic_name__ = SamVisionConfig( hidden_size=1024, num_hidden_layers=24, num_attention_heads=16, global_attn_indexes=[5, 11, 17, 23], ) __magic_name__ = SamConfig( vision_config=A_, ) elif "sam_vit_h" in model_name: __magic_name__ = SamVisionConfig( hidden_size=1280, num_hidden_layers=32, num_attention_heads=16, global_attn_indexes=[7, 15, 23, 31], ) __magic_name__ = SamConfig( vision_config=A_, ) __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = replace_keys(A_ ) __magic_name__ = SamImageProcessor() __magic_name__ = SamProcessor(image_processor=A_ ) __magic_name__ = SamModel(A_ ) hf_model.load_state_dict(A_ ) __magic_name__ = hf_model.to("""cuda""" ) __magic_name__ = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ).convert("""RGB""" ) __magic_name__ = [[[400, 650]]] __magic_name__ = [[1]] __magic_name__ = processor(images=np.array(A_ ), return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() if model_name == "sam_vit_h_4b8939": assert scores[-1].item() == 0.579890251159668 __magic_name__ = processor( images=np.array(A_ ), input_points=A_, input_labels=A_, return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9712603092193604 __magic_name__ = ((75, 275, 1725, 850),) __magic_name__ = processor(images=np.array(A_ ), input_boxes=A_, return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.8686015605926514 # Test with 2 points and 1 image. __magic_name__ = [[[400, 650], [800, 650]]] __magic_name__ = [[1, 1]] __magic_name__ = processor( images=np.array(A_ ), input_points=A_, input_labels=A_, return_tensors="""pt""" ).to("""cuda""" ) with torch.no_grad(): __magic_name__ = hf_model(**A_ ) __magic_name__ = output.iou_scores.squeeze() assert scores[-1].item() == 0.9936047792434692 if __name__ == "__main__": __lowerCAmelCase : int = argparse.ArgumentParser() __lowerCAmelCase : Optional[Any] = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195'] parser.add_argument( '--model_name', default='sam_vit_h_4b8939', choices=choices, type=str, help='Path to hf config.json of model to convert', ) parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') parser.add_argument( '--push_to_hub', action='store_true', help='Whether to push the model and processor to the hub after converting', ) parser.add_argument( '--model_hub_id', default='ybelkada/segment-anything', choices=choices, type=str, help='Path to hf config.json of model to convert', ) __lowerCAmelCase : Dict = parser.parse_args() convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
76
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
1
from dataclasses import dataclass, field from typing import Tuple from ..utils import cached_property, is_tf_available, logging, requires_backends from .benchmark_args_utils import BenchmarkArguments if is_tf_available(): import tensorflow as tf __lowerCAmelCase : Any = logging.get_logger(__name__) @dataclass class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = [ """no_inference""", """no_cuda""", """no_tpu""", """no_speed""", """no_memory""", """no_env_print""", """no_multi_process""", ] def __init__( self : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" for deprecated_arg in self.deprecated_args: if deprecated_arg in kwargs: __magic_name__ = deprecated_arg[3:] __magic_name__ = not kwargs.pop(UpperCamelCase__ ) logger.warning( F'''{deprecated_arg} is depreciated. Please use --no-{positive_arg} or''' F''' {positive_arg}={kwargs[positive_arg]}''' ) __magic_name__ = kwargs.pop("""tpu_name""" , self.tpu_name ) __magic_name__ = kwargs.pop("""device_idx""" , self.device_idx ) __magic_name__ = kwargs.pop("""eager_mode""" , self.eager_mode ) __magic_name__ = kwargs.pop("""use_xla""" , self.use_xla ) super().__init__(**UpperCamelCase__ ) a__ = field( default=_A , metadata={"""help""": """Name of TPU"""} , ) a__ = field( default=0 , metadata={"""help""": """CPU / GPU device index. Defaults to 0."""} , ) a__ = field(default=_A , metadata={"""help""": """Benchmark models in eager model."""} ) a__ = field( default=_A , metadata={ """help""": """Benchmark models using XLA JIT compilation. Note that `eager_model` has to be set to `False`.""" } , ) @cached_property def _lowercase ( self : List[Any] ) -> Tuple["tf.distribute.cluster_resolver.TPUClusterResolver"]: """simple docstring""" requires_backends(self , ["""tf"""] ) __magic_name__ = None if self.tpu: try: if self.tpu_name: __magic_name__ = tf.distribute.cluster_resolver.TPUClusterResolver(self.tpu_name ) else: __magic_name__ = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: __magic_name__ = None return tpu @cached_property def _lowercase ( self : int ) -> Tuple["tf.distribute.Strategy", "tf.distribute.cluster_resolver.TPUClusterResolver"]: """simple docstring""" requires_backends(self , ["""tf"""] ) if self.is_tpu: tf.config.experimental_connect_to_cluster(self._setup_tpu ) tf.tpu.experimental.initialize_tpu_system(self._setup_tpu ) __magic_name__ = tf.distribute.TPUStrategy(self._setup_tpu ) else: # currently no multi gpu is allowed if self.is_gpu: # TODO: Currently only single GPU is supported tf.config.set_visible_devices(self.gpu_list[self.device_idx] , """GPU""" ) __magic_name__ = tf.distribute.OneDeviceStrategy(device=F'''/gpu:{self.device_idx}''' ) else: tf.config.set_visible_devices([] , """GPU""" ) # disable GPU __magic_name__ = tf.distribute.OneDeviceStrategy(device=F'''/cpu:{self.device_idx}''' ) return strategy @property def _lowercase ( self : Union[str, Any] ) -> bool: """simple docstring""" requires_backends(self , ["""tf"""] ) return self._setup_tpu is not None @property def _lowercase ( self : int ) -> "tf.distribute.Strategy": """simple docstring""" requires_backends(self , ["""tf"""] ) return self._setup_strategy @property def _lowercase ( self : Optional[Any] ) -> List[str]: """simple docstring""" requires_backends(self , ["""tf"""] ) return tf.config.list_physical_devices("""GPU""" ) @property def _lowercase ( self : int ) -> int: """simple docstring""" requires_backends(self , ["""tf"""] ) if self.cuda: return len(self.gpu_list ) return 0 @property def _lowercase ( self : Dict ) -> bool: """simple docstring""" return self.n_gpu > 0
76
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
1
import unittest import numpy as np from transformers.testing_utils import is_flaky, require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DonutImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int=7 , UpperCamelCase__ : int=3 , UpperCamelCase__ : Tuple=18 , UpperCamelCase__ : str=30 , UpperCamelCase__ : List[str]=400 , UpperCamelCase__ : Optional[Any]=True , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : str=True , UpperCamelCase__ : Union[str, Any]=False , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , UpperCamelCase__ : List[str]=[0.5, 0.5, 0.5] , ) -> List[Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = image_size __magic_name__ = min_resolution __magic_name__ = max_resolution __magic_name__ = do_resize __magic_name__ = size if size is not None else {"""height""": 18, """width""": 20} __magic_name__ = do_thumbnail __magic_name__ = do_align_axis __magic_name__ = do_pad __magic_name__ = do_normalize __magic_name__ = image_mean __magic_name__ = image_std def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_thumbnail": self.do_thumbnail, "do_align_long_axis": self.do_align_axis, "do_pad": self.do_pad, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, } @require_torch @require_vision class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = DonutImageProcessor if is_vision_available() else None def _lowercase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = DonutImageProcessingTester(self ) @property def _lowercase ( self : Dict ) -> List[str]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : List[str] ) -> List[str]: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_thumbnail""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_align_long_axis""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_pad""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) ) def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""height""": 18, """width""": 20} ) __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 ) self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} ) # Previous config had dimensions in (width, height) order __magic_name__ = self.image_processing_class.from_dict(self.image_processor_dict , size=(42, 84) ) self.assertEqual(image_processor.size , {"""height""": 84, """width""": 42} ) def _lowercase ( self : List[Any] ) -> Tuple: """simple docstring""" pass @is_flaky() def _lowercase ( self : Union[str, Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) @is_flaky() def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , ) # Test batched __magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.size["""height"""], self.image_processor_tester.size["""width"""], ) , )
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
import argparse import pickle import numpy as np import torch from torch import nn from transformers import ReformerConfig, ReformerModelWithLMHead from transformers.utils import logging logging.set_verbosity_info() def a__ ( A_, A_, A_=None ): '''simple docstring''' assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match''' __magic_name__ = nn.Parameter(A_ ) if bias is not None: assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match''' __magic_name__ = nn.Parameter(A_ ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = np.asarray(weights[0] ) __magic_name__ = np.asarray(weights[1] ) __magic_name__ = np.asarray(weights[2] ) set_param( torch_layer.self_attention.query_key, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), ) set_param( torch_layer.self_attention.value, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), ) set_param( torch_layer.output.dense, torch.tensor(A_ ).view(-1, A_ ).contiguous().transpose(0, 1 ), ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = np.asarray(weights[0] ) __magic_name__ = np.asarray(weights[1] ) __magic_name__ = np.asarray(weights[2] ) __magic_name__ = np.asarray(weights[3] ) set_param( torch_layer.self_attention.query, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), ) set_param( torch_layer.self_attention.key, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), ) set_param( torch_layer.self_attention.value, torch.tensor(A_ ).transpose(1, 2 ).contiguous().view(-1, A_ ), ) set_param( torch_layer.output.dense, torch.tensor(A_ ).view(-1, A_ ).contiguous().transpose(0, 1 ), ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = weights[0][0][0] __magic_name__ = np.asarray(layer_norm_a[0] ) __magic_name__ = np.asarray(layer_norm_a[1] ) set_param( torch_block.attention.layer_norm, torch.tensor(A_ ), torch.tensor(A_ ), ) # lsh weights + output __magic_name__ = weights[0][1] if len(A_ ) < 4: set_layer_weights_in_torch_lsh(A_, torch_block.attention, A_ ) else: set_layer_weights_in_torch_local(A_, torch_block.attention, A_ ) # intermediate weighs __magic_name__ = weights[2][0][1][2] # Chunked Feed Forward if len(A_ ) == 4: __magic_name__ = intermediate_weights[2] # layernorm 2 __magic_name__ = np.asarray(intermediate_weights[0][0] ) __magic_name__ = np.asarray(intermediate_weights[0][1] ) set_param( torch_block.feed_forward.layer_norm, torch.tensor(A_ ), torch.tensor(A_ ), ) # intermediate dense __magic_name__ = np.asarray(intermediate_weights[1][0] ) __magic_name__ = np.asarray(intermediate_weights[1][1] ) set_param( torch_block.feed_forward.dense.dense, torch.tensor(A_ ).transpose(0, 1 ).contiguous(), torch.tensor(A_ ), ) # intermediate out __magic_name__ = np.asarray(intermediate_weights[4][0] ) __magic_name__ = np.asarray(intermediate_weights[4][1] ) set_param( torch_block.feed_forward.output.dense, torch.tensor(A_ ).transpose(0, 1 ).contiguous(), torch.tensor(A_ ), ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch_model.reformer # word embeds __magic_name__ = np.asarray(weights[1] ) set_param( torch_model_reformer.embeddings.word_embeddings, torch.tensor(A_ ), ) if isinstance(weights[3], A_ ): __magic_name__ = torch_model_reformer.embeddings.position_embeddings for emb_idx in range(len(position_embeddings.weights ) ): __magic_name__ = np.asarray(weights[3][emb_idx][0] ) assert ( position_embeddings.weights[emb_idx].shape == emb_weights.shape ), f'''{position_embeddings[emb_idx]} emb does not match''' __magic_name__ = nn.Parameter(torch.tensor(A_ ) ) __magic_name__ = weights[5] assert len(torch_model_reformer.encoder.layers ) * 4 == len( A_ ), "HF and trax model do not have the same number of layers" for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ): __magic_name__ = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)] set_block_weights_in_torch(A_, A_, A_ ) # output layer norm __magic_name__ = np.asarray(weights[7][0] ) __magic_name__ = np.asarray(weights[7][1] ) set_param( torch_model_reformer.encoder.layer_norm, torch.tensor(A_ ), torch.tensor(A_ ), ) # output embeddings __magic_name__ = np.asarray(weights[9][0] ) __magic_name__ = np.asarray(weights[9][1] ) set_param( torch_model.lm_head.decoder, torch.tensor(A_ ).transpose(0, 1 ).contiguous(), torch.tensor(A_ ), ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = ReformerConfig.from_json_file(A_ ) print(f'''Building PyTorch model from configuration: {config}''' ) __magic_name__ = ReformerModelWithLMHead(A_ ) with open(A_, """rb""" ) as f: __magic_name__ = pickle.load(A_ )["""weights"""] set_model_weights_in_torch(A_, A_, config.hidden_size ) # Save pytorch-model print(f'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict(), A_ ) if __name__ == "__main__": __lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--trax_model_pkl_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained Reformer model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) __lowerCAmelCase : int = parser.parse_args() convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path)
76
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
1
from ...utils import is_torch_available, is_transformers_available if is_transformers_available() and is_torch_available(): from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
76
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
1
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments def a__ ( ): '''simple docstring''' __magic_name__ = HfArgumentParser(A_ ) __magic_name__ = parser.parse_args_into_dataclasses()[0] __magic_name__ = TensorFlowBenchmark(args=A_ ) try: __magic_name__ = parser.parse_args_into_dataclasses()[0] except ValueError as e: __magic_name__ = """Arg --no_{0} is no longer used, please use --no-{0} instead.""" __magic_name__ = """ """.join(str(A_ ).split(""" """ )[:-1] ) __magic_name__ = """""" __magic_name__ = eval(str(A_ ).split(""" """ )[-1] ) __magic_name__ = [] for arg in depreciated_args: # arg[2:] removes '--' if arg[2:] in TensorFlowBenchmark.deprecated_args: # arg[5:] removes '--no_' full_error_msg += arg_error_msg.format(arg[5:] ) else: wrong_args.append(A_ ) if len(A_ ) > 0: __magic_name__ = full_error_msg + begin_error_msg + str(A_ ) raise ValueError(A_ ) benchmark.run() if __name__ == "__main__": main()
76
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
1
from pathlib import Path import numpy as np from PIL import Image def a__ ( A_ ): '''simple docstring''' __magic_name__ , __magic_name__ , __magic_name__ = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def a__ ( A_ ): '''simple docstring''' return (gray > 127) & (gray <= 255) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = np.zeros_like(A_ ) __magic_name__ = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __magic_name__ = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __magic_name__ = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __magic_name__ = int(summation > 0 ) return output if __name__ == "__main__": # read original image __lowerCAmelCase : List[str] = Path(__file__).resolve().parent / 'image_data' / 'lena.jpg' __lowerCAmelCase : Tuple = np.array(Image.open(lena_path)) # kernel to be applied __lowerCAmelCase : List[Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) __lowerCAmelCase : str = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image __lowerCAmelCase : Tuple = Image.fromarray(output).convert('RGB') pil_img.save('result_dilation.png')
76
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
1
import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , ) @pytest.mark.usefixtures("""sm_env""" ) @parameterized_class( [ { """framework""": """pytorch""", """script""": """run_glue.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 6_50, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """pytorch""", """script""": """run_ddp.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.7, """eval_loss""": 0.6}, }, { """framework""": """tensorflow""", """script""": """run_tf_dist.py""", """model_name_or_path""": """distilbert-base-cased""", """instance_type""": """ml.p3.16xlarge""", """results""": {"""train_runtime""": 6_00, """eval_accuracy""": 0.6, """eval_loss""": 0.7}, }, ] ) class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" if self.framework == "pytorch": subprocess.run( F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=UpperCamelCase__ , ) assert hasattr(self , """env""" ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict ) -> Any: """simple docstring""" __magic_name__ = F'''{self.env.base_job_name}-{instance_count}-{'ddp' if 'ddp' in self.script else 'smd'}''' # distributed data settings __magic_name__ = {"""smdistributed""": {"""dataparallel""": {"""enabled""": True}}} if self.script != """run_ddp.py""" else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=UpperCamelCase__ , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={**self.env.distributed_hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="""py36""" , ) def _lowercase ( self : str , UpperCamelCase__ : List[str] ) -> Optional[int]: """simple docstring""" TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' ) @parameterized.expand([(2,)] ) def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] ) -> int: """simple docstring""" __magic_name__ = self.create_estimator(UpperCamelCase__ ) # run training estimator.fit() # result dataframe __magic_name__ = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis __magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] ) __magic_name__ = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping __magic_name__ = ( Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 99_9999 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy ) assert all(t <= self.results["""eval_loss"""] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , UpperCamelCase__ )
76
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
1
__lowerCAmelCase : Any = tuple[float, float, float] __lowerCAmelCase : List[Any] = tuple[float, float, float] def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = end_pointa[0] - end_pointa[0] __magic_name__ = end_pointa[1] - end_pointa[1] __magic_name__ = end_pointa[2] - end_pointa[2] return (x, y, z) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = ab[1] * ac[2] - ab[2] * ac[1] # *i __magic_name__ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j __magic_name__ = ab[0] * ac[1] - ab[1] * ac[0] # *k return (x, y, z) def a__ ( A_, A_ ): '''simple docstring''' return tuple(round(A_, A_ ) for x in vector ) == (0, 0, 0) def a__ ( A_, A_, A_, A_ = 10 ): '''simple docstring''' __magic_name__ = create_vector(A_, A_ ) __magic_name__ = create_vector(A_, A_ ) return is_zero_vector(get_ad_vectors_cross(A_, A_ ), A_ )
76
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
1
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : int = { 'google/vit-base-patch16-224': 'https://huggingface.co/vit-base-patch16-224/resolve/main/config.json', # See all ViT models at https://huggingface.co/models?filter=vit } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """vit""" def __init__( self : Any , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : str=12 , UpperCamelCase__ : List[str]=12 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : Dict="gelu" , UpperCamelCase__ : Any=0.0 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : List[str]=1E-12 , UpperCamelCase__ : Union[str, Any]=224 , UpperCamelCase__ : Dict=16 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=16 , **UpperCamelCase__ : int , ) -> int: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = image_size __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = qkv_bias __magic_name__ = encoder_stride class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = version.parse("""1.11""" ) @property def _lowercase ( self : Dict ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ] ) @property def _lowercase ( self : Optional[int] ) -> float: """simple docstring""" return 1E-4
76
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import BertTokenizer, BertTokenizerFast from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = tempfile.mkdtemp() __magic_name__ = [ """[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """的""", """价""", """格""", """是""", """15""", """便""", """alex""", """##andra""", """,""", """。""", """-""", """t""", """shirt""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) __magic_name__ = { """do_resize""": True, """size""": {"""height""": 224, """width""": 224}, """do_center_crop""": True, """crop_size""": {"""height""": 18, """width""": 18}, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], """do_convert_rgb""": True, } __magic_name__ = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : Tuple , **UpperCamelCase__ : Optional[int] ) -> Dict: """simple docstring""" return BertTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : Tuple , **UpperCamelCase__ : List[str] ) -> Any: """simple docstring""" return BertTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , **UpperCamelCase__ : Dict ) -> Tuple: """simple docstring""" return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str ) -> Optional[int]: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowercase ( self : int ) -> Any: """simple docstring""" __magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __magic_name__ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : Dict ) -> Tuple: """simple docstring""" __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_rust_tokenizer() __magic_name__ = self.get_image_processor() __magic_name__ = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) __magic_name__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) __magic_name__ = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) __magic_name__ = ChineseCLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def _lowercase ( self : str ) -> Optional[Any]: """simple docstring""" __magic_name__ = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __magic_name__ = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" ) __magic_name__ = self.get_image_processor(do_normalize=UpperCamelCase__ ) __magic_name__ = ChineseCLIPProcessor.from_pretrained( self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=UpperCamelCase__ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = self.prepare_image_inputs() __magic_name__ = image_processor(UpperCamelCase__ , return_tensors="""np""" ) __magic_name__ = processor(images=UpperCamelCase__ , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Union[str, Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """Alexandra,T-shirt的价格是15便士。""" __magic_name__ = processor(text=UpperCamelCase__ ) __magic_name__ = tokenizer(UpperCamelCase__ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """Alexandra,T-shirt的价格是15便士。""" __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowercase ( self : int ) -> List[str]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __magic_name__ = processor.batch_decode(UpperCamelCase__ ) __magic_name__ = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : str ) -> Any: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = ChineseCLIPProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """Alexandra,T-shirt的价格是15便士。""" __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
76
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) __lowerCAmelCase : str = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Dict = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : int = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys __lowerCAmelCase : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
import argparse import re from flax.traverse_util import flatten_dict, unflatten_dict from tax import checkpoints from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model from transformers.utils import logging logging.set_verbosity_info() # should not include what is already done by the `from_pt` argument __lowerCAmelCase : Union[str, Any] = { '/attention/': '/0/SelfAttention/', '/self_attention/': '/0/SelfAttention/', '/encoder_decoder_attention/': '/1/EncDecAttention/', 'value': 'v', 'query': 'q', 'key': 'k', 'out': 'o', 'pre_self_attention_layer_norm': '0/layer_norm', 'pre_cross_attention_layer_norm': '1/layer_norm', 'pre_attention_layer_norm': '0/layer_norm', # previously 1, but seems wrong 'token_embedder': 'shared', 'encoder_norm': 'final_layer_norm', 'decoder_norm': 'final_layer_norm', 'relpos_bias/rel_embedding': 'block/0/layer/0/SelfAttention/relative_attention_bias/weight', 'router/router_weights/w/': 'router/classifier/', 'roer/roer_weights/w/': 'router/classifier/', 'logits_dense': 'lm_head', } def a__ ( A_ ): '''simple docstring''' __magic_name__ = list(s_dict.keys() ) for key in keys: __magic_name__ = R""".*/layers_(\d+)""" __magic_name__ = key if re.match(A_, A_ ): __magic_name__ = re.sub(R"""layers_(\d+)""", R"""block/\1/layer""", A_ ) __magic_name__ = R"""(encoder|decoder)\/""" if re.match(A_, A_ ): __magic_name__ = re.match(A_, A_ ).groups() if groups[0] == "encoder": __magic_name__ = re.sub(R"""/mlp/""", R"""/1/mlp/""", A_ ) __magic_name__ = re.sub(R"""/pre_mlp_layer_norm/""", R"""/1/layer_norm/""", A_ ) elif groups[0] == "decoder": __magic_name__ = re.sub(R"""/mlp/""", R"""/2/mlp/""", A_ ) __magic_name__ = re.sub(R"""/pre_mlp_layer_norm/""", R"""/2/layer_norm/""", A_ ) # 2. Convert other classic mappings for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items(): if old_key in new_key: __magic_name__ = new_key.replace(A_, A_ ) print(f'''{key} -> {new_key}''' ) __magic_name__ = s_dict.pop(A_ ) if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __magic_name__ = s_dict[ """encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict: __magic_name__ = s_dict[ """decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight""" ].T # 3. Take extra care of the EXPERTS layer for key in list(s_dict.keys() ): if "expert" in key: __magic_name__ = s_dict[key].shape[0] __magic_name__ = s_dict[key] for idx in range(A_ ): __magic_name__ = expert_weihts[idx] print(f'''{key} -> {key.replace('expert/', 'nested fstring' )}''' ) s_dict.pop(A_ ) return s_dict __lowerCAmelCase : Dict = { 'NUM_ENCODER_LAYERS': 'num_layers', 'NUM_DECODER_LAYERS': 'num_decoder_layers', 'NUM_HEADS': 'num_heads', 'HEAD_DIM': 'd_kv', 'EMBED_DIM': 'd_model', 'MLP_DIM': 'd_ff', 'NUM_SELECTED_EXPERTS': 'num_selected_experts', 'NUM_ENCODER_SPARSE_LAYERS': 'num_sparse_encoder_layers', 'NUM_DECODER_SPARSE_LAYERS': 'num_sparse_decoder_layers', 'dense.MlpBlock.activations': 'feed_forward_proj', } def a__ ( A_, A_ ): '''simple docstring''' import regex as re with open(A_, """r""" ) as f: __magic_name__ = f.read() __magic_name__ = re.findall(R"""(.*) = ([0-9.]*)""", A_ ) __magic_name__ = {} for param, value in regex_match: if param in GIN_TO_CONFIG_MAPPING and value != "": __magic_name__ = float(A_ ) if """.""" in value else int(A_ ) __magic_name__ = re.findall(R"""(.*activations) = \(\'(.*)\',\)""", A_ )[0] __magic_name__ = str(activation[1] ) __magic_name__ = num_experts __magic_name__ = SwitchTransformersConfig(**A_ ) return config def a__ ( A_, A_, A_=None, A_="./", A_=8 ): '''simple docstring''' print(f'''Loading flax weights from : {flax_checkpoint_path}''' ) __magic_name__ = checkpoints.load_tax_checkpoint(A_ ) if gin_file is not None: __magic_name__ = convert_gin_to_config(A_, A_ ) else: __magic_name__ = SwitchTransformersConfig.from_pretrained(A_ ) __magic_name__ = SwitchTransformersForConditionalGeneration(A_ ) __magic_name__ = flax_params["""target"""] __magic_name__ = flatten_dict(A_, sep="""/""" ) __magic_name__ = rename_keys(A_ ) __magic_name__ = unflatten_dict(A_, sep="""/""" ) # Load the flax params in the PT model load_flax_weights_in_pytorch_model(A_, A_ ) print(f'''Save PyTorch model to {pytorch_dump_path}''' ) pt_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '--switch_t5x_checkpoint_path', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the' ' model architecture. If not provided, a `gin_file` has to be provided.' ), ) parser.add_argument( '--gin_file', default=None, type=str, required=False, help='Path to the gin config file. If not provided, a `config_file` has to be passed ', ) parser.add_argument( '--config_name', default=None, type=str, required=False, help='Config name of SwitchTransformers model.' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output pytorch model.' ) parser.add_argument('--num_experts', default=8, type=int, required=False, help='Number of experts') __lowerCAmelCase : Dict = parser.parse_args() convert_flax_checkpoint_to_pytorch( args.switch_tax_checkpoint_path, args.config_name, args.gin_file, args.pytorch_dump_folder_path, args.num_experts, )
76
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
1
from math import ceil from typing import List, Optional, Union import numpy as np from ...audio_utils import mel_filter_bank, spectrogram, window_function from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor from ...utils import TensorType, logging __lowerCAmelCase : List[str] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""audio_values""", """audio_mask"""] def __init__( self : List[str] , UpperCamelCase__ : Union[str, Any]=2048 , UpperCamelCase__ : Optional[Any]=1 , UpperCamelCase__ : Tuple=[16, 16] , UpperCamelCase__ : List[str]=128 , UpperCamelCase__ : Union[str, Any]=4_4100 , UpperCamelCase__ : List[str]=86 , UpperCamelCase__ : str=2048 , UpperCamelCase__ : Optional[Any]=0.0 , **UpperCamelCase__ : List[Any] , ) -> List[str]: """simple docstring""" super().__init__( feature_size=UpperCamelCase__ , sampling_rate=UpperCamelCase__ , padding_value=UpperCamelCase__ , **UpperCamelCase__ , ) __magic_name__ = spectrogram_length __magic_name__ = num_channels __magic_name__ = patch_size __magic_name__ = feature_size // self.patch_size[1] __magic_name__ = n_fft __magic_name__ = sampling_rate // hop_length_to_sampling_rate __magic_name__ = sampling_rate __magic_name__ = padding_value __magic_name__ = mel_filter_bank( num_frequency_bins=1 + n_fft // 2 , num_mel_filters=UpperCamelCase__ , min_frequency=0.0 , max_frequency=22050.0 , sampling_rate=UpperCamelCase__ , norm="""slaney""" , mel_scale="""slaney""" , ).T def _lowercase ( self : List[str] , UpperCamelCase__ : np.array ) -> np.ndarray: """simple docstring""" __magic_name__ = spectrogram( UpperCamelCase__ , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , ) __magic_name__ = log_spec[:, :-1] __magic_name__ = log_spec - 20.0 __magic_name__ = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0 return log_spec def __call__( self : Union[str, Any] , UpperCamelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[bool] = True , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , **UpperCamelCase__ : Dict , ) -> BatchFeature: """simple docstring""" if sampling_rate is not None: if sampling_rate != self.sampling_rate: raise ValueError( """This feature extractor is set to support sampling rate""" F''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled''' F''' with {self.sampling_rate} and not {sampling_rate}.''' ) else: logger.warning( """It is strongly recommended to pass the `sampling_rate` argument to this function. """ """Failing to do so can result in silent errors that might be hard to debug.""" ) __magic_name__ = isinstance(UpperCamelCase__ , np.ndarray ) and len(raw_speech.shape ) > 1 if is_batched_numpy and len(raw_speech.shape ) > 2: raise ValueError(F'''Only mono-channel audio is supported for input to {self}''' ) __magic_name__ = is_batched_numpy or ( isinstance(UpperCamelCase__ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) )) ) if is_batched: __magic_name__ = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech] elif not is_batched and not isinstance(UpperCamelCase__ , np.ndarray ): __magic_name__ = np.asarray(UpperCamelCase__ , dtype=np.floataa ) elif isinstance(UpperCamelCase__ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ): __magic_name__ = raw_speech.astype(np.floataa ) # always return batch if not is_batched: __magic_name__ = [np.asarray([raw_speech] ).T] # Convert audio signals to log mel spectrograms, truncate by time axis __magic_name__ = [ self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech ] if isinstance(audio_features[0] , UpperCamelCase__ ): __magic_name__ = [np.asarray(UpperCamelCase__ , dtype=np.floataa ) for feature in audio_features] # Create audio attention mask __magic_name__ = max( [ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch if return_attention_mask: __magic_name__ = [ (ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1] + (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0] for feature in audio_features ] __magic_name__ = np.array(UpperCamelCase__ ).astype(np.floataa ) # convert into correct format for padding __magic_name__ = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch __magic_name__ = np.ones([len(UpperCamelCase__ ), 1, max_time_len, self.feature_size] ).astype(np.floataa ) __magic_name__ = padded_audio_features * self.padding_value for i in range(len(UpperCamelCase__ ) ): __magic_name__ = audio_features[i] __magic_name__ = feature # return as BatchFeature if return_attention_mask: __magic_name__ = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask} else: __magic_name__ = {"""audio_values""": padded_audio_features} __magic_name__ = BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ ) return encoded_inputs
76
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
1
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __magic_name__ = tempfile.mkdtemp() # fmt: off __magic_name__ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on __magic_name__ = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) ) __magic_name__ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] __magic_name__ = {"""unk_token""": """<unk>"""} __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(UpperCamelCase__ ) + """\n""" ) with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp: fp.write("""\n""".join(UpperCamelCase__ ) ) __magic_name__ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } __magic_name__ = os.path.join(self.tmpdirname , UpperCamelCase__ ) with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp: json.dump(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : int , **UpperCamelCase__ : Union[str, Any] ) -> int: """simple docstring""" return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase__ ) def _lowercase ( self : Any , **UpperCamelCase__ : int ) -> Any: """simple docstring""" return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="""!""" , **UpperCamelCase__ ) def _lowercase ( self : Any , **UpperCamelCase__ : str ) -> Any: """simple docstring""" return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str ) -> Dict: """simple docstring""" shutil.rmtree(self.tmpdirname ) def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] __magic_name__ = [Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def _lowercase ( self : List[Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.get_tokenizer() __magic_name__ = self.get_rust_tokenizer() __magic_name__ = self.get_image_processor() __magic_name__ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_slow.save_pretrained(self.tmpdirname ) __magic_name__ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ ) __magic_name__ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) processor_fast.save_pretrained(self.tmpdirname ) __magic_name__ = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCamelCase__ ) self.assertIsInstance(processor_fast.tokenizer , UpperCamelCase__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCamelCase__ ) self.assertIsInstance(processor_fast.image_processor , UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" __magic_name__ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) __magic_name__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) __magic_name__ = self.get_image_processor(do_normalize=UpperCamelCase__ ) __magic_name__ = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase__ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCamelCase__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCamelCase__ ) def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = self.prepare_image_inputs() __magic_name__ = image_processor(UpperCamelCase__ , return_tensors="""np""" ) __magic_name__ = processor(images=UpperCamelCase__ , return_tensors="""np""" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _lowercase ( self : Tuple ) -> Tuple: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """lower newer""" __magic_name__ = processor(text=UpperCamelCase__ , return_tensors="""np""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""np""" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def _lowercase ( self : Optional[int] ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = """lower newer""" __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(text=UpperCamelCase__ , images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowercase ( self : Optional[int] ) -> Any: """simple docstring""" __magic_name__ = """google/owlvit-base-patch32""" __magic_name__ = OwlViTProcessor.from_pretrained(UpperCamelCase__ ) __magic_name__ = ["""cat""", """nasa badge"""] __magic_name__ = processor(text=UpperCamelCase__ ) __magic_name__ = 16 self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowercase ( self : Tuple ) -> Optional[Any]: """simple docstring""" __magic_name__ = """google/owlvit-base-patch32""" __magic_name__ = OwlViTProcessor.from_pretrained(UpperCamelCase__ ) __magic_name__ = [["""cat""", """nasa badge"""], ["""person"""]] __magic_name__ = processor(text=UpperCamelCase__ ) __magic_name__ = 16 __magic_name__ = len(UpperCamelCase__ ) __magic_name__ = max([len(UpperCamelCase__ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = """google/owlvit-base-patch32""" __magic_name__ = OwlViTProcessor.from_pretrained(UpperCamelCase__ ) __magic_name__ = ["""cat""", """nasa badge"""] __magic_name__ = processor(text=UpperCamelCase__ ) __magic_name__ = 16 __magic_name__ = inputs["""input_ids"""] __magic_name__ = [ [4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask"""] ) self.assertEqual(inputs["""input_ids"""].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def _lowercase ( self : Any ) -> Dict: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = self.prepare_image_inputs() __magic_name__ = self.prepare_image_inputs() __magic_name__ = processor(images=UpperCamelCase__ , query_images=UpperCamelCase__ ) self.assertListEqual(list(inputs.keys() ) , ["""query_pixel_values""", """pixel_values"""] ) # test if it raises when no input is passed with pytest.raises(UpperCamelCase__ ): processor() def _lowercase ( self : List[str] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.get_image_processor() __magic_name__ = self.get_tokenizer() __magic_name__ = OwlViTProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ ) __magic_name__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] __magic_name__ = processor.batch_decode(UpperCamelCase__ ) __magic_name__ = tokenizer.batch_decode(UpperCamelCase__ ) self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
from ...configuration_utils import PretrainedConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : int = { 'shi-labs/dinat-mini-in1k-224': 'https://huggingface.co/shi-labs/dinat-mini-in1k-224/resolve/main/config.json', # See all Dinat models at https://huggingface.co/models?filter=dinat } class UpperCAmelCase_ ( _A , _A ): '''simple docstring''' a__ = """dinat""" a__ = { """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers""", } def __init__( self : str , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=3 , UpperCamelCase__ : str=64 , UpperCamelCase__ : Dict=[3, 4, 6, 5] , UpperCamelCase__ : Tuple=[2, 4, 8, 16] , UpperCamelCase__ : str=7 , UpperCamelCase__ : Optional[int]=[[1, 8, 1], [1, 4, 1, 4], [1, 2, 1, 2, 1, 2], [1, 1, 1, 1, 1]] , UpperCamelCase__ : str=3.0 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Optional[Any]=0.0 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : str=0.02 , UpperCamelCase__ : Optional[int]=1E-5 , UpperCamelCase__ : str=0.0 , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : Union[str, Any] , ) -> Union[str, Any]: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = patch_size __magic_name__ = num_channels __magic_name__ = embed_dim __magic_name__ = depths __magic_name__ = len(UpperCamelCase__ ) __magic_name__ = num_heads __magic_name__ = kernel_size __magic_name__ = dilations __magic_name__ = mlp_ratio __magic_name__ = qkv_bias __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = drop_path_rate __magic_name__ = hidden_act __magic_name__ = layer_norm_eps __magic_name__ = initializer_range # we set the hidden_size attribute in order to make Dinat work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __magic_name__ = int(embed_dim * 2 ** (len(UpperCamelCase__ ) - 1) ) __magic_name__ = layer_scale_init_value __magic_name__ = ["""stem"""] + [F'''stage{idx}''' for idx in range(1 , len(UpperCamelCase__ ) + 1 )] __magic_name__ , __magic_name__ = get_aligned_output_features_output_indices( out_features=UpperCamelCase__ , out_indices=UpperCamelCase__ , stage_names=self.stage_names )
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json', 'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json', 'microsoft/deberta-v2-xlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json' ), 'microsoft/deberta-v2-xxlarge-mnli': ( 'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """deberta-v2""" def __init__( self : Optional[Any] , UpperCamelCase__ : Union[str, Any]=12_8100 , UpperCamelCase__ : Tuple=1536 , UpperCamelCase__ : int=24 , UpperCamelCase__ : str=24 , UpperCamelCase__ : List[str]=6144 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[Any]=0.1 , UpperCamelCase__ : Optional[Any]=512 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : Optional[Any]=0.02 , UpperCamelCase__ : Any=1E-7 , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : List[str]=-1 , UpperCamelCase__ : Tuple=0 , UpperCamelCase__ : int=True , UpperCamelCase__ : str=None , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : List[Any]="gelu" , **UpperCamelCase__ : Dict , ) -> Union[str, Any]: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = relative_attention __magic_name__ = max_relative_positions __magic_name__ = pad_token_id __magic_name__ = position_biased_input # Backwards compatibility if type(UpperCamelCase__ ) == str: __magic_name__ = [x.strip() for x in pos_att_type.lower().split("""|""" )] __magic_name__ = pos_att_type __magic_name__ = vocab_size __magic_name__ = layer_norm_eps __magic_name__ = kwargs.get("""pooler_hidden_size""" , UpperCamelCase__ ) __magic_name__ = pooler_dropout __magic_name__ = pooler_hidden_act class UpperCAmelCase_ ( _A ): '''simple docstring''' @property def _lowercase ( self : int ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" if self.task == "multiple-choice": __magic_name__ = {0: """batch""", 1: """choice""", 2: """sequence"""} else: __magic_name__ = {0: """batch""", 1: """sequence"""} if self._config.type_vocab_size > 0: return OrderedDict( [("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] ) else: return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] ) @property def _lowercase ( self : Tuple ) -> int: """simple docstring""" return 12 def _lowercase ( self : Tuple , UpperCamelCase__ : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : int = -1 , UpperCamelCase__ : bool = False , UpperCamelCase__ : Optional["TensorType"] = None , UpperCamelCase__ : int = 3 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : int = 40 , UpperCamelCase__ : "PreTrainedTokenizerBase" = None , ) -> Mapping[str, Any]: """simple docstring""" __magic_name__ = super().generate_dummy_inputs(preprocessor=UpperCamelCase__ , framework=UpperCamelCase__ ) if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs: del dummy_inputs["token_type_ids"] return dummy_inputs
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
def a__ ( A_ ): '''simple docstring''' __magic_name__ = 0 for ch in input_str: __magic_name__ = ord(A_ ) __magic_name__ = pow(2, A_ ) # If we already turned on bit for current character's unicode if bitmap >> ch_unicode & 1 == 1: return False bitmap |= ch_bit_index_on return True if __name__ == "__main__": import doctest doctest.testmod()
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
from __future__ import annotations from typing import TypedDict class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = 42 a__ = 42 def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise TypeError("""The parameter s type must be str.""" ) return [s[i:] + s[:i] for i in range(len(A_ ) )] def a__ ( A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise TypeError("""The parameter s type must be str.""" ) if not s: raise ValueError("""The parameter s must not be empty.""" ) __magic_name__ = all_rotations(A_ ) rotations.sort() # sort the list of rotations in alphabetically order # make a string composed of the last char of each rotation __magic_name__ = { "bwt_string": "".join([word[-1] for word in rotations] ), "idx_original_string": rotations.index(A_ ), } return response def a__ ( A_, A_ ): '''simple docstring''' if not isinstance(A_, A_ ): raise TypeError("""The parameter bwt_string type must be str.""" ) if not bwt_string: raise ValueError("""The parameter bwt_string must not be empty.""" ) try: __magic_name__ = int(A_ ) except ValueError: raise TypeError( """The parameter idx_original_string type must be int or passive""" """ of cast to int.""" ) if idx_original_string < 0: raise ValueError("""The parameter idx_original_string must not be lower than 0.""" ) if idx_original_string >= len(A_ ): raise ValueError( """The parameter idx_original_string must be lower than""" """ len(bwt_string).""" ) __magic_name__ = [""""""] * len(A_ ) for _ in range(len(A_ ) ): for i in range(len(A_ ) ): __magic_name__ = bwt_string[i] + ordered_rotations[i] ordered_rotations.sort() return ordered_rotations[idx_original_string] if __name__ == "__main__": __lowerCAmelCase : Tuple = 'Provide a string that I will generate its BWT transform: ' __lowerCAmelCase : List[Any] = input(entry_msg).strip() __lowerCAmelCase : List[Any] = bwt_transform(s) print( F'''Burrows Wheeler transform for string \'{s}\' results ''' F'''in \'{result["bwt_string"]}\'''' ) __lowerCAmelCase : Optional[Any] = reverse_bwt(result['bwt_string'], result['idx_original_string']) print( F'''Reversing Burrows Wheeler transform for entry \'{result["bwt_string"]}\' ''' F'''we get original string \'{original_string}\'''' )
76
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
from typing import List, Optional, Union import torch from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) __lowerCAmelCase : int = logging.get_logger(__name__) # pylint: disable=invalid-name __lowerCAmelCase : Union[str, Any] = '\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained("kandinsky-community/kandinsky-2-2-prior")\n >>> pipe_prior.to("cuda")\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> zero_image_emb = out.negative_image_embeds\n >>> pipe = KandinskyV22Pipeline.from_pretrained("kandinsky-community/kandinsky-2-2-decoder")\n >>> pipe.to("cuda")\n >>> image = pipe(\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=50,\n ... ).images\n >>> image[0].save("cat.png")\n ```\n' def a__ ( A_, A_, A_=8 ): '''simple docstring''' __magic_name__ = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 __magic_name__ = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : UNetaDConditionModel , UpperCamelCase__ : DDPMScheduler , UpperCamelCase__ : VQModel , ) -> Union[str, Any]: """simple docstring""" super().__init__() self.register_modules( unet=UpperCamelCase__ , scheduler=UpperCamelCase__ , movq=UpperCamelCase__ , ) __magic_name__ = 2 ** (len(self.movq.config.block_out_channels ) - 1) def _lowercase ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Optional[Any] ) -> Tuple: """simple docstring""" if latents is None: __magic_name__ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ ) else: if latents.shape != shape: raise ValueError(F'''Unexpected latents shape, got {latents.shape}, expected {shape}''' ) __magic_name__ = latents.to(UpperCamelCase__ ) __magic_name__ = latents * scheduler.init_noise_sigma return latents def _lowercase ( self : int , UpperCamelCase__ : List[Any]=0 ) -> Union[str, Any]: """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""" ) __magic_name__ = torch.device(F'''cuda:{gpu_id}''' ) __magic_name__ = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(UpperCamelCase__ , UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : Dict=0 ) -> int: """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" ) __magic_name__ = torch.device(F'''cuda:{gpu_id}''' ) if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=UpperCamelCase__ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) __magic_name__ = None for cpu_offloaded_model in [self.unet, self.movq]: __magic_name__ , __magic_name__ = cpu_offload_with_hook(UpperCamelCase__ , UpperCamelCase__ , prev_module_hook=UpperCamelCase__ ) # We'll offload the last model manually. __magic_name__ = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowercase ( self : int ) -> Tuple: """simple docstring""" if not hasattr(self.unet , """_hf_hook""" ): return self.device for module in self.unet.modules(): if ( hasattr(UpperCamelCase__ , """_hf_hook""" ) and hasattr(module._hf_hook , """execution_device""" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(UpperCamelCase__ ) def __call__( self : Dict , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : Union[torch.FloatTensor, List[torch.FloatTensor]] , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 512 , UpperCamelCase__ : int = 100 , UpperCamelCase__ : float = 4.0 , UpperCamelCase__ : int = 1 , UpperCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase__ : Optional[torch.FloatTensor] = None , UpperCamelCase__ : Optional[str] = "pil" , UpperCamelCase__ : bool = True , ) -> int: """simple docstring""" __magic_name__ = self._execution_device __magic_name__ = guidance_scale > 1.0 if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __magic_name__ = torch.cat(UpperCamelCase__ , dim=0 ) __magic_name__ = image_embeds.shape[0] * num_images_per_prompt if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __magic_name__ = torch.cat(UpperCamelCase__ , dim=0 ) if do_classifier_free_guidance: __magic_name__ = image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 ) __magic_name__ = negative_image_embeds.repeat_interleave(UpperCamelCase__ , dim=0 ) __magic_name__ = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=UpperCamelCase__ ) self.scheduler.set_timesteps(UpperCamelCase__ , device=UpperCamelCase__ ) __magic_name__ = self.scheduler.timesteps __magic_name__ = self.unet.config.in_channels __magic_name__ , __magic_name__ = downscale_height_and_width(UpperCamelCase__ , UpperCamelCase__ , self.movq_scale_factor ) # create initial latent __magic_name__ = self.prepare_latents( (batch_size, num_channels_latents, height, width) , image_embeds.dtype , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.scheduler , ) for i, t in enumerate(self.progress_bar(UpperCamelCase__ ) ): # expand the latents if we are doing classifier free guidance __magic_name__ = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents __magic_name__ = {"""image_embeds""": image_embeds} __magic_name__ = self.unet( sample=UpperCamelCase__ , timestep=UpperCamelCase__ , encoder_hidden_states=UpperCamelCase__ , added_cond_kwargs=UpperCamelCase__ , return_dict=UpperCamelCase__ , )[0] if do_classifier_free_guidance: __magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 ) __magic_name__ , __magic_name__ = noise_pred.chunk(2 ) __magic_name__ , __magic_name__ = variance_pred.chunk(2 ) __magic_name__ = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) __magic_name__ = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , """variance_type""" ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): __magic_name__ , __magic_name__ = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 __magic_name__ = self.scheduler.step( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ , )[0] # post-processing __magic_name__ = self.movq.decode(UpperCamelCase__ , force_not_quantize=UpperCamelCase__ )["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(F'''Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}''' ) if output_type in ["np", "pil"]: __magic_name__ = image * 0.5 + 0.5 __magic_name__ = image.clamp(0 , 1 ) __magic_name__ = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": __magic_name__ = self.numpy_to_pil(UpperCamelCase__ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase__ )
76
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
1
from __future__ import annotations def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = list(range(len(A_ ) ) ) __magic_name__ = [v / w for v, w in zip(A_, A_ )] index.sort(key=lambda A_ : ratio[i], reverse=A_ ) __magic_name__ = 0 __magic_name__ = [0] * len(A_ ) for i in index: if weight[i] <= capacity: __magic_name__ = 1 max_value += value[i] capacity -= weight[i] else: __magic_name__ = capacity / weight[i] max_value += value[i] * capacity / weight[i] break return max_value, fractions if __name__ == "__main__": import doctest doctest.testmod()
76
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
1
import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets __lowerCAmelCase : Optional[int] = datasets.logging.get_logger(__name__) __lowerCAmelCase : Tuple = '\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n' __lowerCAmelCase : List[str] = '\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project\'s README at https://github.com/google-research/bleurt#readme for more information.\n' __lowerCAmelCase : Optional[Any] = '\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n \'scores\': List of scores.\nExamples:\n\n >>> predictions = ["hello there", "general kenobi"]\n >>> references = ["hello there", "general kenobi"]\n >>> bleurt = datasets.load_metric("bleurt")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results["scores"]])\n [1.03, 1.04]\n' __lowerCAmelCase : Any = { 'bleurt-tiny-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip', 'bleurt-tiny-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip', 'bleurt-base-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip', 'bleurt-base-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip', 'bleurt-large-128': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip', 'bleurt-large-512': 'https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip', 'BLEURT-20-D3': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip', 'BLEURT-20-D6': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip', 'BLEURT-20-D12': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip', 'BLEURT-20': 'https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip', } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _lowercase ( self : Optional[int] ) -> Dict: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence""" ), """references""": datasets.Value("""string""" , id="""sequence""" ), } ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , ) def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any] ) -> int: """simple docstring""" if self.config_name == "default": logger.warning( """Using default BLEURT-Base checkpoint for sequence maximum length 128. """ """You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" ) __magic_name__ = """bleurt-base-128""" if self.config_name.lower() in CHECKPOINT_URLS: __magic_name__ = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: __magic_name__ = self.config_name.upper() else: raise KeyError( F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' ) # download the model checkpoint specified by self.config_name and set up the scorer __magic_name__ = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) __magic_name__ = score.BleurtScorer(os.path.join(UpperCamelCase__ , UpperCamelCase__ ) ) def _lowercase ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.scorer.score(references=UpperCamelCase__ , candidates=UpperCamelCase__ ) return {"scores": scores}
76
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) __lowerCAmelCase : Dict = { 'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig'] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[Any] = ['ConvNextFeatureExtractor'] __lowerCAmelCase : str = ['ConvNextImageProcessor'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST', 'ConvNextForImageClassification', 'ConvNextModel', 'ConvNextPreTrainedModel', 'ConvNextBackbone', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : List[str] = [ 'TFConvNextForImageClassification', 'TFConvNextModel', 'TFConvNextPreTrainedModel', ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys __lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure)
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
1
def a__ ( A_, A_ ): '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def a__ ( ): '''simple docstring''' assert and_gate(0, 0 ) == 0 assert and_gate(0, 1 ) == 0 assert and_gate(1, 0 ) == 0 assert and_gate(1, 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
76
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MobileNetVaConfig(layer_norm_eps=0.001 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) __magic_name__ = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""", A_ ) if matches: __magic_name__ = float(matches[1] ) __magic_name__ = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". __magic_name__ = 1001 __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = """huggingface/label-files""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ) + 1: v for k, v in idalabel.items()} __magic_name__ = """background""" __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_=False ): '''simple docstring''' __magic_name__ = get_mobilenet_va_config(A_ ) # Load 🤗 model __magic_name__ = MobileNetVaForImageClassification(A_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(A_, A_, A_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor __magic_name__ = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size}, size={"""shortest_edge""": config.image_size + 32}, ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ = outputs.logits assert logits.shape == (1, 1001) if model_name == "mobilenet_v1_1.0_224": __magic_name__ = torch.tensor([-4.1739, -1.1233, 3.1205] ) elif model_name == "mobilenet_v1_0.75_192": __magic_name__ = torch.tensor([-3.9440, -2.3141, -0.3333] ) else: __magic_name__ = None if expected_logits is not None: assert torch.allclose(logits[0, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: print("""Pushing to the hub...""" ) __magic_name__ = """google/""" + model_name image_processor.push_to_hub(A_ ) model.push_to_hub(A_ ) if __name__ == "__main__": __lowerCAmelCase : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default='mobilenet_v1_1.0_224', type=str, help='Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.', ) parser.add_argument( '--checkpoint_path', required=True, type=str, help='Path to the original TensorFlow checkpoint (.ckpt file).' ) parser.add_argument( '--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : str = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
76
1
import numpy as np import datasets __lowerCAmelCase : int = '\nCompute the Mahalanobis Distance\n\nMahalonobis distance is the distance between a point and a distribution.\nAnd not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.\nIt was introduced by Prof. P. C. Mahalanobis in 1936\nand has been used in various statistical applications ever since\n[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]\n' __lowerCAmelCase : Optional[Any] = '\\n@article{de2000mahalanobis,\n title={The mahalanobis distance},\n author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},\n journal={Chemometrics and intelligent laboratory systems},\n volume={50},\n number={1},\n pages={1--18},\n year={2000},\n publisher={Elsevier}\n}\n' __lowerCAmelCase : Union[str, Any] = '\nArgs:\n X: List of datapoints to be compared with the `reference_distribution`.\n reference_distribution: List of datapoints from the reference distribution we want to compare to.\nReturns:\n mahalanobis: The Mahalonobis distance for each datapoint in `X`.\nExamples:\n\n >>> mahalanobis_metric = datasets.load_metric("mahalanobis")\n >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])\n >>> print(results)\n {\'mahalanobis\': array([0.5])}\n' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class UpperCAmelCase_ ( datasets.Metric ): '''simple docstring''' def _lowercase ( self : Tuple ) -> List[str]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """X""": datasets.Sequence(datasets.Value("""float""" , id="""sequence""" ) , id="""X""" ), } ) , ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[str] ) -> int: """simple docstring""" __magic_name__ = np.array(UpperCamelCase__ ) __magic_name__ = np.array(UpperCamelCase__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("""Expected `X` to be a 2D vector""" ) if len(reference_distribution.shape ) != 2: raise ValueError("""Expected `reference_distribution` to be a 2D vector""" ) if reference_distribution.shape[0] < 2: raise ValueError( """Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension""" ) # Get mahalanobis distance for each prediction __magic_name__ = X - np.mean(UpperCamelCase__ ) __magic_name__ = np.cov(reference_distribution.T ) try: __magic_name__ = np.linalg.inv(UpperCamelCase__ ) except np.linalg.LinAlgError: __magic_name__ = np.linalg.pinv(UpperCamelCase__ ) __magic_name__ = np.dot(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = np.dot(UpperCamelCase__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
76
import collections import importlib.util import os import re from pathlib import Path __lowerCAmelCase : int = 'src/transformers' # Matches is_xxx_available() __lowerCAmelCase : Optional[int] = re.compile(R'is\_([a-z_]*)_available()') # Catches a one-line _import_struct = {xxx} __lowerCAmelCase : Dict = re.compile(R'^_import_structure\s+=\s+\{([^\}]+)\}') # Catches a line with a key-values pattern: "bla": ["foo", "bar"] __lowerCAmelCase : int = re.compile(R'\s+"\S*":\s+\[([^\]]*)\]') # Catches a line if not is_foo_available __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)') # Catches a line _import_struct["bla"].append("foo") __lowerCAmelCase : Optional[Any] = re.compile(R'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)') # Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"] __lowerCAmelCase : Dict = re.compile(R'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]') # Catches a line with an object between quotes and a comma: "MyModel", __lowerCAmelCase : List[str] = re.compile('^\s+"([^"]+)",') # Catches a line with objects between brackets only: ["foo", "bar"], __lowerCAmelCase : Optional[int] = re.compile('^\s+\[([^\]]+)\]') # Catches a line with from foo import bar, bla, boo __lowerCAmelCase : List[str] = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') # Catches a line with try: __lowerCAmelCase : int = re.compile(R'^\s*try:') # Catches a line with else: __lowerCAmelCase : Tuple = re.compile(R'^\s*else:') def a__ ( A_ ): '''simple docstring''' if _re_test_backend.search(A_ ) is None: return None __magic_name__ = [b[0] for b in _re_backend.findall(A_ )] backends.sort() return "_and_".join(A_ ) def a__ ( A_ ): '''simple docstring''' with open(A_, """r""", encoding="""utf-8""", newline="""\n""" ) as f: __magic_name__ = f.readlines() __magic_name__ = 0 while line_index < len(A_ ) and not lines[line_index].startswith("""_import_structure = {""" ): line_index += 1 # If this is a traditional init, just return. if line_index >= len(A_ ): return None # First grab the objects without a specific backend in _import_structure __magic_name__ = [] while not lines[line_index].startswith("""if TYPE_CHECKING""" ) and find_backend(lines[line_index] ) is None: __magic_name__ = lines[line_index] # If we have everything on a single line, let's deal with it. if _re_one_line_import_struct.search(A_ ): __magic_name__ = _re_one_line_import_struct.search(A_ ).groups()[0] __magic_name__ = re.findall("""\[([^\]]+)\]""", A_ ) for imp in imports: objects.extend([obj[1:-1] for obj in imp.split(""", """ )] ) line_index += 1 continue __magic_name__ = _re_import_struct_key_value.search(A_ ) if single_line_import_search is not None: __magic_name__ = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(""", """ ) if len(A_ ) > 0] objects.extend(A_ ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects in _import_structure while not lines[line_index].startswith("""if TYPE_CHECKING""" ): # If the line is an if not is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 4 ): __magic_name__ = lines[line_index] if _re_import_struct_add_one.search(A_ ) is not None: objects.append(_re_import_struct_add_one.search(A_ ).groups()[0] ) elif _re_import_struct_add_many.search(A_ ) is not None: __magic_name__ = _re_import_struct_add_many.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_between_brackets.search(A_ ) is not None: __magic_name__ = _re_between_brackets.search(A_ ).groups()[0].split(""", """ ) __magic_name__ = [obj[1:-1] for obj in imports if len(A_ ) > 0] objects.extend(A_ ) elif _re_quote_object.search(A_ ) is not None: objects.append(_re_quote_object.search(A_ ).groups()[0] ) elif line.startswith(""" """ * 8 + """\"""" ): objects.append(line[9:-3] ) elif line.startswith(""" """ * 12 + """\"""" ): objects.append(line[13:-3] ) line_index += 1 __magic_name__ = objects else: line_index += 1 # At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend __magic_name__ = [] while ( line_index < len(A_ ) and find_backend(lines[line_index] ) is None and not lines[line_index].startswith("""else""" ) ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 8 ): objects.append(line[8:-2] ) line_index += 1 __magic_name__ = {"""none""": objects} # Let's continue with backend-specific objects while line_index < len(A_ ): # If the line is an if is_backend_available, we grab all objects associated. __magic_name__ = find_backend(lines[line_index] ) # Check if the backend declaration is inside a try block: if _re_try.search(lines[line_index - 1] ) is None: __magic_name__ = None if backend is not None: line_index += 1 # Scroll until we hit the else block of try-except-else while _re_else.search(lines[line_index] ) is None: line_index += 1 line_index += 1 __magic_name__ = [] # Until we unindent, add backend objects to the list while len(lines[line_index] ) <= 1 or lines[line_index].startswith(""" """ * 8 ): __magic_name__ = lines[line_index] __magic_name__ = _re_import.search(A_ ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(""", """ ) ) elif line.startswith(""" """ * 12 ): objects.append(line[12:-2] ) line_index += 1 __magic_name__ = objects else: line_index += 1 return import_dict_objects, type_hint_objects def a__ ( A_, A_ ): '''simple docstring''' def find_duplicates(A_ ): return [k for k, v in collections.Counter(A_ ).items() if v > 1] if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ): return ["Both sides of the init do not have the same backends!"] __magic_name__ = [] for key in import_dict_objects.keys(): __magic_name__ = find_duplicates(import_dict_objects[key] ) if duplicate_imports: errors.append(f'''Duplicate _import_structure definitions for: {duplicate_imports}''' ) __magic_name__ = find_duplicates(type_hint_objects[key] ) if duplicate_type_hints: errors.append(f'''Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}''' ) if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ): __magic_name__ = """base imports""" if key == """none""" else f'''{key} backend''' errors.append(f'''Differences for {name}:''' ) for a in type_hint_objects[key]: if a not in import_dict_objects[key]: errors.append(f''' {a} in TYPE_HINT but not in _import_structure.''' ) for a in import_dict_objects[key]: if a not in type_hint_objects[key]: errors.append(f''' {a} in _import_structure but not in TYPE_HINT.''' ) return errors def a__ ( ): '''simple docstring''' __magic_name__ = [] for root, _, files in os.walk(A_ ): if "__init__.py" in files: __magic_name__ = os.path.join(A_, """__init__.py""" ) __magic_name__ = parse_init(A_ ) if objects is not None: __magic_name__ = analyze_results(*A_ ) if len(A_ ) > 0: __magic_name__ = f'''Problem in {fname}, both halves do not define the same objects.\n{errors[0]}''' failures.append("""\n""".join(A_ ) ) if len(A_ ) > 0: raise ValueError("""\n\n""".join(A_ ) ) def a__ ( ): '''simple docstring''' __magic_name__ = [] for path, directories, files in os.walk(A_ ): for folder in directories: # Ignore private modules if folder.startswith("""_""" ): directories.remove(A_ ) continue # Ignore leftovers from branches (empty folders apart from pycache) if len(list((Path(A_ ) / folder).glob("""*.py""" ) ) ) == 0: continue __magic_name__ = str((Path(A_ ) / folder).relative_to(A_ ) ) __magic_name__ = short_path.replace(os.path.sep, """.""" ) submodules.append(A_ ) for fname in files: if fname == "__init__.py": continue __magic_name__ = str((Path(A_ ) / fname).relative_to(A_ ) ) __magic_name__ = short_path.replace(""".py""", """""" ).replace(os.path.sep, """.""" ) if len(submodule.split(""".""" ) ) == 1: submodules.append(A_ ) return submodules __lowerCAmelCase : Dict = [ 'convert_pytorch_checkpoint_to_tf2', 'modeling_flax_pytorch_utils', ] def a__ ( ): '''simple docstring''' __magic_name__ = importlib.util.spec_from_file_location( """transformers""", os.path.join(A_, """__init__.py""" ), submodule_search_locations=[PATH_TO_TRANSFORMERS], ) __magic_name__ = spec.loader.load_module() __magic_name__ = [ module for module in get_transformers_submodules() if module not in IGNORE_SUBMODULES and module not in transformers._import_structure.keys() ] if len(A_ ) > 0: __magic_name__ = """\n""".join(f'''- {module}''' for module in module_not_registered ) raise ValueError( """The following submodules are not properly registered in the main init of Transformers:\n""" f'''{list_of_modules}\n''' """Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.""" ) if __name__ == "__main__": check_all_inits() check_submodules()
76
1
import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import YolosConfig, YolosForObjectDetection, YolosImageProcessor from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_ ): '''simple docstring''' __magic_name__ = YolosConfig() # size of the architecture if "yolos_ti" in yolos_name: __magic_name__ = 192 __magic_name__ = 768 __magic_name__ = 12 __magic_name__ = 3 __magic_name__ = [800, 1333] __magic_name__ = False elif yolos_name == "yolos_s_dWr": __magic_name__ = 330 __magic_name__ = 14 __magic_name__ = 6 __magic_name__ = 1320 elif "yolos_s" in yolos_name: __magic_name__ = 384 __magic_name__ = 1536 __magic_name__ = 12 __magic_name__ = 6 elif "yolos_b" in yolos_name: __magic_name__ = [800, 1344] __magic_name__ = 91 __magic_name__ = """huggingface/label-files""" __magic_name__ = """coco-detection-id2label.json""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ): v for k, v in idalabel.items()} __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} return config def a__ ( A_, A_, A_ = False ): '''simple docstring''' for i in range(config.num_hidden_layers ): # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __magic_name__ = in_proj_weight[: config.hidden_size, :] __magic_name__ = in_proj_bias[: config.hidden_size] __magic_name__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __magic_name__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __magic_name__ = in_proj_weight[-config.hidden_size :, :] __magic_name__ = in_proj_bias[-config.hidden_size :] def a__ ( A_ ): '''simple docstring''' if "backbone" in name: __magic_name__ = name.replace("""backbone""", """vit""" ) if "cls_token" in name: __magic_name__ = name.replace("""cls_token""", """embeddings.cls_token""" ) if "det_token" in name: __magic_name__ = name.replace("""det_token""", """embeddings.detection_tokens""" ) if "mid_pos_embed" in name: __magic_name__ = name.replace("""mid_pos_embed""", """encoder.mid_position_embeddings""" ) if "pos_embed" in name: __magic_name__ = name.replace("""pos_embed""", """embeddings.position_embeddings""" ) if "patch_embed.proj" in name: __magic_name__ = name.replace("""patch_embed.proj""", """embeddings.patch_embeddings.projection""" ) if "blocks" in name: __magic_name__ = name.replace("""blocks""", """encoder.layer""" ) if "attn.proj" in name: __magic_name__ = name.replace("""attn.proj""", """attention.output.dense""" ) if "attn" in name: __magic_name__ = name.replace("""attn""", """attention.self""" ) if "norm1" in name: __magic_name__ = name.replace("""norm1""", """layernorm_before""" ) if "norm2" in name: __magic_name__ = name.replace("""norm2""", """layernorm_after""" ) if "mlp.fc1" in name: __magic_name__ = name.replace("""mlp.fc1""", """intermediate.dense""" ) if "mlp.fc2" in name: __magic_name__ = name.replace("""mlp.fc2""", """output.dense""" ) if "class_embed" in name: __magic_name__ = name.replace("""class_embed""", """class_labels_classifier""" ) if "bbox_embed" in name: __magic_name__ = name.replace("""bbox_embed""", """bbox_predictor""" ) if "vit.norm" in name: __magic_name__ = name.replace("""vit.norm""", """vit.layernorm""" ) return name def a__ ( A_, A_ ): '''simple docstring''' for key in orig_state_dict.copy().keys(): __magic_name__ = orig_state_dict.pop(A_ ) if "qkv" in key: __magic_name__ = key.split(""".""" ) __magic_name__ = int(key_split[2] ) __magic_name__ = model.vit.encoder.layer[layer_num].attention.attention.all_head_size if "weight" in key: __magic_name__ = val[:dim, :] __magic_name__ = val[ dim : dim * 2, : ] __magic_name__ = val[-dim:, :] else: __magic_name__ = val[:dim] __magic_name__ = val[dim : dim * 2] __magic_name__ = val[-dim:] else: __magic_name__ = val return orig_state_dict def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_, A_, A_ = False ): '''simple docstring''' __magic_name__ = get_yolos_config(A_ ) # load original state_dict __magic_name__ = torch.load(A_, map_location="""cpu""" )["""model"""] # load 🤗 model __magic_name__ = YolosForObjectDetection(A_ ) model.eval() __magic_name__ = convert_state_dict(A_, A_ ) model.load_state_dict(A_ ) # Check outputs on an image, prepared by YolosImageProcessor __magic_name__ = 800 if yolos_name != """yolos_ti""" else 512 __magic_name__ = YolosImageProcessor(format="""coco_detection""", size=A_ ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = model(**A_ ) __magic_name__ , __magic_name__ = outputs.logits, outputs.pred_boxes __magic_name__ , __magic_name__ = None, None if yolos_name == "yolos_ti": __magic_name__ = torch.tensor( [[-39.5022, -11.9820, -17.6888], [-29.9574, -9.9769, -17.7691], [-42.3281, -20.7200, -30.6294]] ) __magic_name__ = torch.tensor( [[0.4021, 0.0836, 0.7979], [0.0184, 0.2609, 0.0364], [0.1781, 0.2004, 0.2095]] ) elif yolos_name == "yolos_s_200_pre": __magic_name__ = torch.tensor( [[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] ) __magic_name__ = torch.tensor( [[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] ) elif yolos_name == "yolos_s_300_pre": __magic_name__ = torch.tensor( [[-36.2220, -14.4385, -23.5457], [-35.6970, -14.7583, -21.3935], [-31.5939, -13.6042, -16.8049]] ) __magic_name__ = torch.tensor( [[0.7614, 0.2316, 0.4728], [0.7168, 0.4495, 0.3855], [0.4996, 0.1466, 0.9996]] ) elif yolos_name == "yolos_s_dWr": __magic_name__ = torch.tensor( [[-42.8668, -24.1049, -41.1690], [-34.7456, -14.1274, -24.9194], [-33.7898, -12.1946, -25.6495]] ) __magic_name__ = torch.tensor( [[0.5587, 0.2773, 0.0605], [0.5004, 0.3014, 0.9994], [0.4999, 0.1548, 0.9994]] ) elif yolos_name == "yolos_base": __magic_name__ = torch.tensor( [[-40.6064, -24.3084, -32.6447], [-55.1990, -30.7719, -35.5877], [-51.4311, -33.3507, -35.6462]] ) __magic_name__ = torch.tensor( [[0.5555, 0.2794, 0.0655], [0.9049, 0.2664, 0.1894], [0.9183, 0.1984, 0.1635]] ) else: raise ValueError(f'''Unknown yolos_name: {yolos_name}''' ) assert torch.allclose(logits[0, :3, :3], A_, atol=1e-4 ) assert torch.allclose(pred_boxes[0, :3, :3], A_, atol=1e-4 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {yolos_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if push_to_hub: __magic_name__ = { """yolos_ti""": """yolos-tiny""", """yolos_s_200_pre""": """yolos-small""", """yolos_s_300_pre""": """yolos-small-300""", """yolos_s_dWr""": """yolos-small-dwr""", """yolos_base""": """yolos-base""", } print("""Pushing to the hub...""" ) __magic_name__ = model_mapping[yolos_name] image_processor.push_to_hub(A_, organization="""hustvl""" ) model.push_to_hub(A_, organization="""hustvl""" ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--yolos_name', default='yolos_s_200_pre', type=str, help=( 'Name of the YOLOS model you\'d like to convert. Should be one of \'yolos_ti\', \'yolos_s_200_pre\',' ' \'yolos_s_300_pre\', \'yolos_s_dWr\', \'yolos_base\'.' ), ) parser.add_argument( '--checkpoint_path', default=None, type=str, help='Path to the original state dict (.pth file).' ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) parser.add_argument( '--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.' ) __lowerCAmelCase : Tuple = parser.parse_args() convert_yolos_checkpoint(args.yolos_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
76
import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : List[Any] = { 'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json', # See all SEW-D models at https://huggingface.co/models?filter=sew-d } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """sew-d""" def __init__( self : List[str] , UpperCamelCase__ : Tuple=32 , UpperCamelCase__ : Optional[int]=768 , UpperCamelCase__ : Tuple=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : int=3072 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : List[Any]=512 , UpperCamelCase__ : Any=256 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : str=("p2c", "c2p") , UpperCamelCase__ : List[Any]="layer_norm" , UpperCamelCase__ : int="gelu_python" , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : List[Any]=0.02 , UpperCamelCase__ : Optional[int]=1E-7 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="group" , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , UpperCamelCase__ : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , UpperCamelCase__ : Optional[Any]=False , UpperCamelCase__ : Optional[int]=128 , UpperCamelCase__ : Tuple=16 , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : Dict=0.05 , UpperCamelCase__ : str=10 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : Dict=0.0 , UpperCamelCase__ : Dict=10 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="mean" , UpperCamelCase__ : int=False , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Optional[int]=256 , UpperCamelCase__ : List[str]=0 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=2 , **UpperCamelCase__ : str , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ ) __magic_name__ = hidden_size __magic_name__ = feat_extract_norm __magic_name__ = feat_extract_activation __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = conv_bias __magic_name__ = num_conv_pos_embeddings __magic_name__ = num_conv_pos_embedding_groups __magic_name__ = len(self.conv_dim ) __magic_name__ = num_hidden_layers __magic_name__ = intermediate_size __magic_name__ = squeeze_factor __magic_name__ = max_position_embeddings __magic_name__ = position_buckets __magic_name__ = share_att_key __magic_name__ = relative_attention __magic_name__ = norm_rel_ebd __magic_name__ = list(UpperCamelCase__ ) __magic_name__ = hidden_act __magic_name__ = num_attention_heads __magic_name__ = hidden_dropout __magic_name__ = attention_dropout __magic_name__ = activation_dropout __magic_name__ = feat_proj_dropout __magic_name__ = final_dropout __magic_name__ = layer_norm_eps __magic_name__ = feature_layer_norm_eps __magic_name__ = initializer_range __magic_name__ = vocab_size if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" F'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)''' F'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 __magic_name__ = apply_spec_augment __magic_name__ = mask_time_prob __magic_name__ = mask_time_length __magic_name__ = mask_time_min_masks __magic_name__ = mask_feature_prob __magic_name__ = mask_feature_length __magic_name__ = mask_feature_min_masks # ctc loss __magic_name__ = ctc_loss_reduction __magic_name__ = ctc_zero_infinity # sequence classification __magic_name__ = use_weighted_layer_sum __magic_name__ = classifier_proj_size @property def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1 )
76
1
import numpy as np def a__ ( A_, A_ ): '''simple docstring''' return np.where(vector > 0, A_, (alpha * (np.exp(A_ ) - 1)) ) if __name__ == "__main__": import doctest doctest.testmod()
76
import math import random def a__ ( A_, A_ = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __lowerCAmelCase : Union[str, Any] = 0.02 def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = float(2 * (random.randint(1, 100 )) - 1 ) for _ in range(A_ ): # Forward propagation __magic_name__ = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __magic_name__ = (expected / 100) - layer_a # Error delta __magic_name__ = layer_1_error * sigmoid_function(A_, A_ ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : List[Any] = int(input('Expected value: ')) __lowerCAmelCase : Tuple = int(input('Number of propagations: ')) print(forward_propagation(expected, number_propagations))
76
1
import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = '▁' __lowerCAmelCase : Any = {'vocab_file': 'prophetnet.tokenizer'} __lowerCAmelCase : List[str] = { 'vocab_file': { 'microsoft/xprophetnet-large-wiki100-cased': ( 'https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer' ), } } __lowerCAmelCase : str = { 'microsoft/xprophetnet-large-wiki100-cased': {'do_lower_case': False}, } __lowerCAmelCase : Any = { 'microsoft/xprophetnet-large-wiki100-cased': 512, } def a__ ( A_ ): '''simple docstring''' __magic_name__ = collections.OrderedDict() with open(A_, """r""", encoding="""utf-8""" ) as reader: __magic_name__ = reader.readlines() for index, token in enumerate(A_ ): __magic_name__ = token.rstrip("""\n""" ) __magic_name__ = index return vocab class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["""input_ids""", """attention_mask"""] def __init__( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : int="[SEP]" , UpperCamelCase__ : Optional[int]="[SEP]" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : Optional[int]="[UNK]" , UpperCamelCase__ : List[Any]="[PAD]" , UpperCamelCase__ : Optional[Any]="[CLS]" , UpperCamelCase__ : Any="[MASK]" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Tuple , ) -> None: """simple docstring""" __magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase__ ) ) __magic_name__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __magic_name__ = {"""[PAD]""": 0, """[CLS]""": 1, """[SEP]""": 2, """[UNK]""": 3, """[MASK]""": 4} for i in range(10 ): __magic_name__ = F'''[unused{i}]''' __magic_name__ = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __magic_name__ = 12 __magic_name__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(UpperCamelCase__ ) def __getstate__( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __magic_name__ = self.__dict__.copy() __magic_name__ = None return state def __setstate__( self : int , UpperCamelCase__ : str ) -> Optional[int]: """simple docstring""" __magic_name__ = d try: import sentencepiece as spm except ImportError: logger.warning( """You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece""" """ pip install sentencepiece""" ) raise # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __magic_name__ = {} __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def _lowercase ( self : List[str] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is None: return ([0] * len(UpperCamelCase__ )) + [1] return ([0] * len(UpperCamelCase__ )) + [1] + ([0] * len(UpperCamelCase__ )) + [1] def _lowercase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __magic_name__ = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase ( self : List[Any] , UpperCamelCase__ : str ) -> str: """simple docstring""" return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __magic_name__ = self.sp_model.PieceToId(UpperCamelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowercase ( self : int , UpperCamelCase__ : List[str] ) -> List[Any]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip() return out_string def _lowercase ( self : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , """wb""" ) as fi: __magic_name__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return token_ids_a + [self.sep_token_id] __magic_name__ = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
76
import os import sys __lowerCAmelCase : Optional[Any] = os.path.join(os.path.dirname(__file__), 'src') sys.path.append(SRC_DIR) from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForQuestionAnswering, AutoModelForSequenceClassification, AutoTokenizer, add_start_docstrings, ) __lowerCAmelCase : Union[str, Any] = [ 'torch', 'numpy', 'tokenizers', 'filelock', 'requests', 'tqdm', 'regex', 'sentencepiece', 'sacremoses', 'importlib_metadata', 'huggingface_hub', ] @add_start_docstrings(AutoConfig.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoConfig.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoTokenizer.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoTokenizer.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModel.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModel.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForCausalLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForCausalLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForMaskedLM.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForMaskedLM.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForSequenceClassification.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForSequenceClassification.from_pretrained(*A_, **A_ ) @add_start_docstrings(AutoModelForQuestionAnswering.__doc__ ) def a__ ( *A_, **A_ ): '''simple docstring''' return AutoModelForQuestionAnswering.from_pretrained(*A_, **A_ )
76
1
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Optional[Any] = logging.get_logger(__name__) __lowerCAmelCase : Optional[Any] = { 'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json', } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lxmert""" a__ = {} def __init__( self : Optional[Any] , UpperCamelCase__ : str=3_0522 , UpperCamelCase__ : int=768 , UpperCamelCase__ : int=12 , UpperCamelCase__ : Union[str, Any]=9500 , UpperCamelCase__ : Union[str, Any]=1600 , UpperCamelCase__ : List[Any]=400 , UpperCamelCase__ : Tuple=3072 , UpperCamelCase__ : Union[str, Any]="gelu" , UpperCamelCase__ : str=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : int=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : Union[str, Any]=1E-12 , UpperCamelCase__ : Optional[Any]=9 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : Tuple=2048 , UpperCamelCase__ : str=4 , UpperCamelCase__ : Optional[int]=6.67 , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : str=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Any=True , **UpperCamelCase__ : List[str] , ) -> Optional[Any]: """simple docstring""" __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = num_qa_labels __magic_name__ = num_object_labels __magic_name__ = num_attr_labels __magic_name__ = l_layers __magic_name__ = x_layers __magic_name__ = r_layers __magic_name__ = visual_feat_dim __magic_name__ = visual_pos_dim __magic_name__ = visual_loss_normalizer __magic_name__ = task_matched __magic_name__ = task_mask_lm __magic_name__ = task_obj_predict __magic_name__ = task_qa __magic_name__ = visual_obj_loss __magic_name__ = visual_attr_loss __magic_name__ = visual_feat_loss __magic_name__ = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers} super().__init__(**UpperCamelCase__ )
76
from typing import Dict from .base import GenericTensor, Pipeline class UpperCAmelCase_ ( _A ): '''simple docstring''' def _lowercase ( self : List[Any] , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Union[str, Any]=None , UpperCamelCase__ : Any=None , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" if tokenize_kwargs is None: __magic_name__ = {} if truncation is not None: if "truncation" in tokenize_kwargs: raise ValueError( """truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)""" ) __magic_name__ = truncation __magic_name__ = tokenize_kwargs __magic_name__ = {} if return_tensors is not None: __magic_name__ = return_tensors return preprocess_params, {}, postprocess_params def _lowercase ( self : int , UpperCamelCase__ : int , **UpperCamelCase__ : int ) -> Dict[str, GenericTensor]: """simple docstring""" __magic_name__ = self.framework __magic_name__ = self.tokenizer(UpperCamelCase__ , return_tensors=UpperCamelCase__ , **UpperCamelCase__ ) return model_inputs def _lowercase ( self : str , UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = self.model(**UpperCamelCase__ ) return model_outputs def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str]=False ) -> List[str]: """simple docstring""" if return_tensors: return model_outputs[0] if self.framework == "pt": return model_outputs[0].tolist() elif self.framework == "tf": return model_outputs[0].numpy().tolist() def __call__( self : List[str] , *UpperCamelCase__ : List[Any] , **UpperCamelCase__ : List[Any] ) -> Dict: """simple docstring""" return super().__call__(*UpperCamelCase__ , **UpperCamelCase__ )
76
1
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __lowerCAmelCase : Any = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __lowerCAmelCase : Union[str, Any] = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __lowerCAmelCase : List[Any] = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = len([g for position, g in enumerate(A_ ) if g == main_target[position]] ) return (item, float(A_ )) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = random.randint(0, len(A_ ) - 1 ) __magic_name__ = parent_a[:random_slice] + parent_a[random_slice:] __magic_name__ = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = list(A_ ) if random.uniform(0, 1 ) < MUTATION_PROBABILITY: __magic_name__ = random.choice(A_ ) return "".join(A_ ) def a__ ( A_, A_, A_, ): '''simple docstring''' __magic_name__ = [] # Generate more children proportionally to the fitness score. __magic_name__ = int(parent_a[1] * 100 ) + 1 __magic_name__ = 10 if child_n >= 10 else child_n for _ in range(A_ ): __magic_name__ = population_score[random.randint(0, A_ )][0] __magic_name__ , __magic_name__ = crossover(parent_a[0], A_ ) # Append new string to the population list. pop.append(mutate(A_, A_ ) ) pop.append(mutate(A_, A_ ) ) return pop def a__ ( A_, A_, A_ = True ): '''simple docstring''' if N_POPULATION < N_SELECTED: __magic_name__ = f'''{N_POPULATION} must be bigger than {N_SELECTED}''' raise ValueError(A_ ) # Verify that the target contains no genes besides the ones inside genes variable. __magic_name__ = sorted({c for c in target if c not in genes} ) if not_in_genes_list: __magic_name__ = f'''{not_in_genes_list} is not in genes list, evolution cannot converge''' raise ValueError(A_ ) # Generate random starting population. __magic_name__ = [] for _ in range(A_ ): population.append("""""".join([random.choice(A_ ) for i in range(len(A_ ) )] ) ) # Just some logs to know what the algorithms is doing. __magic_name__ , __magic_name__ = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(A_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. __magic_name__ = [evaluate(A_, A_ ) for item in population] # Check if there is a matching evolution. __magic_name__ = sorted(A_, key=lambda A_ : x[1], reverse=A_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f'''\nGeneration: {generation}''' f'''\nTotal Population:{total_population}''' f'''\nBest score: {population_score[0][1]}''' f'''\nBest string: {population_score[0][0]}''' ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. __magic_name__ = population[: int(N_POPULATION / 3 )] population.clear() population.extend(A_ ) # Normalize population score to be between 0 and 1. __magic_name__ = [ (item, score / len(A_ )) for item, score in population_score ] # This is selection for i in range(A_ ): population.extend(select(population_score[int(A_ )], A_, A_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(A_ ) > N_POPULATION: break if __name__ == "__main__": __lowerCAmelCase : List[str] = ( 'This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!' ) __lowerCAmelCase : Optional[int] = list( ' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm' 'nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\' ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = basic(target_str, genes_list) print( F'''\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}''' )
76
import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel __lowerCAmelCase : str = { 'gwf-440k': { 'url': 'https://model-server.zqevans2.workers.dev/gwf-440k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-small-190k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt', 'sample_rate': 48000, 'sample_size': 65536, }, 'jmann-large-580k': { 'url': 'https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt', 'sample_rate': 48000, 'sample_size': 131072, }, 'maestro-uncond-150k': { 'url': 'https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'unlocked-uncond-250k': { 'url': 'https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, 'honk-140k': { 'url': 'https://model-server.zqevans2.workers.dev/honk-140k.ckpt', 'sample_rate': 16000, 'sample_size': 65536, }, } def a__ ( A_, A_ ): '''simple docstring''' return torch.atana(A_, A_ ) / math.pi * 2 def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.sin(t * math.pi / 2 ) ** 2 __magic_name__ = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(A_, A_ ) class UpperCAmelCase_ ( _A ): '''simple docstring''' pass class UpperCAmelCase_ ( nn.Module ): '''simple docstring''' def __init__( self : Tuple , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" super().__init__() __magic_name__ = DiffusionAttnUnetaD(UpperCamelCase__ , n_attn_layers=4 ) __magic_name__ = deepcopy(self.diffusion ) __magic_name__ = torch.quasirandom.SobolEngine(1 , scramble=UpperCamelCase__ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = MODELS_MAP[model_name]["""url"""] os.system(f'''wget {url} ./''' ) return f'''./{model_name}.ckpt''' __lowerCAmelCase : Optional[int] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', } __lowerCAmelCase : Optional[Any] = { '8': 'resnets.0', '9': 'attentions.0', '10': 'resnets.1', '11': 'attentions.1', '12': 'resnets.2', '13': 'attentions.2', } __lowerCAmelCase : Union[str, Any] = { '1': 'resnets.0', '2': 'attentions.0', '3': 'resnets.1', '4': 'attentions.1', '5': 'resnets.2', '6': 'attentions.2', '8': 'resnets.3', '9': 'attentions.3', '10': 'resnets.4', '11': 'attentions.4', '12': 'resnets.5', '13': 'attentions.5', } __lowerCAmelCase : int = { '0': 'resnets.0', '1': 'resnets.1', '2': 'resnets.2', '4': 'resnets.0', '5': 'resnets.1', '6': 'resnets.2', } __lowerCAmelCase : List[str] = { 'skip': 'conv_skip', 'main.0': 'conv_1', 'main.1': 'group_norm_1', 'main.3': 'conv_2', 'main.4': 'group_norm_2', } __lowerCAmelCase : int = { 'norm': 'group_norm', 'qkv_proj': ['query', 'key', 'value'], 'out_proj': ['proj_attn'], } def a__ ( A_ ): '''simple docstring''' if name.startswith("""skip""" ): return name.replace("""skip""", RES_CONV_MAP["""skip"""] ) # name has to be of format main.{digit} if not name.startswith("""main.""" ): raise ValueError(f'''ResConvBlock error with {name}''' ) return name.replace(name[:6], RES_CONV_MAP[name[:6]] ) def a__ ( A_ ): '''simple docstring''' for key, value in ATTN_MAP.items(): if name.startswith(A_ ) and not isinstance(A_, A_ ): return name.replace(A_, A_ ) elif name.startswith(A_ ): return [name.replace(A_, A_ ) for v in value] raise ValueError(f'''Attn error with {name}''' ) def a__ ( A_, A_=13 ): '''simple docstring''' __magic_name__ = input_string if string.split(""".""" )[0] == "timestep_embed": return string.replace("""timestep_embed""", """time_proj""" ) __magic_name__ = 0 if string.startswith("""net.3.""" ): depth += 1 __magic_name__ = string[6:] elif string.startswith("""net.""" ): __magic_name__ = string[4:] while string.startswith("""main.7.""" ): depth += 1 __magic_name__ = string[7:] if string.startswith("""main.""" ): __magic_name__ = string[5:] # mid block if string[:2].isdigit(): __magic_name__ = string[:2] __magic_name__ = string[2:] else: __magic_name__ = string[0] __magic_name__ = string[1:] if depth == max_depth: __magic_name__ = MID_NUM_TO_LAYER[layer_num] __magic_name__ = """mid_block""" elif depth > 0 and int(A_ ) < 7: __magic_name__ = DOWN_NUM_TO_LAYER[layer_num] __magic_name__ = f'''down_blocks.{depth}''' elif depth > 0 and int(A_ ) > 7: __magic_name__ = UP_NUM_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: __magic_name__ = DEPTH_0_TO_LAYER[layer_num] __magic_name__ = f'''up_blocks.{max_depth - 1}''' if int(A_ ) > 3 else """down_blocks.0""" if not string_left.startswith(""".""" ): raise ValueError(f'''Naming error with {input_string} and string_left: {string_left}.''' ) __magic_name__ = string_left[1:] if "resnets" in new_layer: __magic_name__ = convert_resconv_naming(A_ ) elif "attentions" in new_layer: __magic_name__ = convert_attn_naming(A_ ) __magic_name__ = new_string_left if not isinstance(A_, A_ ): __magic_name__ = prefix + """.""" + new_layer + """.""" + string_left else: __magic_name__ = [prefix + """.""" + new_layer + """.""" + s for s in string_left] return new_string def a__ ( A_ ): '''simple docstring''' __magic_name__ = {} for k, v in state_dict.items(): if k.endswith("""kernel""" ): # up- and downsample layers, don't have trainable weights continue __magic_name__ = rename(A_ ) # check if we need to transform from Conv => Linear for attention if isinstance(A_, A_ ): __magic_name__ = transform_conv_attns(A_, A_, A_ ) else: __magic_name__ = v return new_state_dict def a__ ( A_, A_, A_ ): '''simple docstring''' if len(A_ ) == 1: if len(v.shape ) == 3: # weight __magic_name__ = v[:, :, 0] else: # bias __magic_name__ = v else: # qkv matrices __magic_name__ = v.shape[0] __magic_name__ = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: __magic_name__ = v[i * single_shape : (i + 1) * single_shape, :, 0] else: __magic_name__ = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def a__ ( A_ ): '''simple docstring''' __magic_name__ = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) __magic_name__ = args.model_path.split("""/""" )[-1].split(""".""" )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), f'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' __magic_name__ = download(A_ ) __magic_name__ = MODELS_MAP[model_name]["""sample_rate"""] __magic_name__ = MODELS_MAP[model_name]["""sample_size"""] __magic_name__ = Object() __magic_name__ = sample_size __magic_name__ = sample_rate __magic_name__ = 0 __magic_name__ = UNetaDModel(sample_size=A_, sample_rate=A_ ) __magic_name__ = diffusers_model.state_dict() __magic_name__ = DiffusionUncond(A_ ) orig_model.load_state_dict(torch.load(args.model_path, map_location=A_ )["""state_dict"""] ) __magic_name__ = orig_model.diffusion_ema.eval() __magic_name__ = orig_model.state_dict() __magic_name__ = rename_orig_weights(A_ ) __magic_name__ = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) __magic_name__ = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(A_ ) == 0, f'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("""kernel""" ) for k in list(A_ ) ), f'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), f'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": __magic_name__ = value.squeeze() __magic_name__ = value diffusers_model.load_state_dict(A_ ) __magic_name__ = 100 __magic_name__ = 33 __magic_name__ = IPNDMScheduler(num_train_timesteps=A_ ) __magic_name__ = torch.manual_seed(A_ ) __magic_name__ = torch.randn([1, 2, config.sample_size], generator=A_ ).to(A_ ) __magic_name__ = torch.linspace(1, 0, steps + 1, device=A_ )[:-1] __magic_name__ = get_crash_schedule(A_ ) __magic_name__ = DanceDiffusionPipeline(unet=A_, scheduler=A_ ) __magic_name__ = torch.manual_seed(33 ) __magic_name__ = pipe(num_inference_steps=A_, generator=A_ ).audios __magic_name__ = sampling.iplms_sample(A_, A_, A_, {} ) __magic_name__ = generated.clamp(-1, 1 ) __magic_name__ = (generated - audio).abs().sum() __magic_name__ = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("""Diff sum""", A_ ) print("""Diff max""", A_ ) assert diff_max < 1e-3, f'''Diff max: {diff_max} is too much :-/''' print(f'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": __lowerCAmelCase : Optional[int] = argparse.ArgumentParser() parser.add_argument('--model_path', default=None, type=str, required=True, help='Path to the model to convert.') parser.add_argument( '--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.' ) parser.add_argument('--checkpoint_path', default=None, type=str, required=True, help='Path to the output model.') __lowerCAmelCase : Union[str, Any] = parser.parse_args() main(args)
76
1
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Tuple = { 'SCUT-DLVCLab/lilt-roberta-en-base': ( 'https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json' ), } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """lilt""" def __init__( self : Dict , UpperCamelCase__ : List[str]=3_0522 , UpperCamelCase__ : Optional[Any]=768 , UpperCamelCase__ : Dict=12 , UpperCamelCase__ : Optional[Any]=12 , UpperCamelCase__ : Dict=3072 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Union[str, Any]=512 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Dict=0.02 , UpperCamelCase__ : Any=1E-12 , UpperCamelCase__ : Optional[int]=0 , UpperCamelCase__ : str="absolute" , UpperCamelCase__ : Any=None , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : Tuple=1024 , **UpperCamelCase__ : Optional[int] , ) -> Dict: """simple docstring""" super().__init__(pad_token_id=UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = hidden_act __magic_name__ = intermediate_size __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = initializer_range __magic_name__ = layer_norm_eps __magic_name__ = position_embedding_type __magic_name__ = classifier_dropout __magic_name__ = channel_shrink_ratio __magic_name__ = max_ad_position_embeddings
76
1
def a__ ( A_ ): '''simple docstring''' return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number if __name__ == "__main__": print('Program to check whether a number is a Perfect number or not...') __lowerCAmelCase : str = int(input('Enter number: ').strip()) print(F'''{number} is {"" if perfect(number) else "not "}a Perfect Number.''')
76
import json import os import tempfile from transformers.testing_utils import check_json_file_has_correct_format class UpperCAmelCase_ : '''simple docstring''' a__ = None def _lowercase ( self : Optional[int] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) __magic_name__ = json.loads(feat_extract.to_json_string() ) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key] , UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = os.path.join(UpperCamelCase__ , """feat_extract.json""" ) feat_extract_first.to_json_file(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_json_file(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: __magic_name__ = feat_extract_first.save_pretrained(UpperCamelCase__ )[0] check_json_file_has_correct_format(UpperCamelCase__ ) __magic_name__ = self.feature_extraction_class.from_pretrained(UpperCamelCase__ ) self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() ) def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.feature_extraction_class() self.assertIsNotNone(UpperCamelCase__ )
76
1
import copy import random from transformers import CLIPTokenizer class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : Optional[int] ) -> Any: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) __magic_name__ = {} def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[int] ) -> int: """simple docstring""" __magic_name__ = super().add_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) if num_added_tokens == 0: raise ValueError( F'''The tokenizer already contains the token {placeholder_token}. Please pass a different''' """ `placeholder_token` that is not already in the tokenizer.""" ) def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] , *UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : str=1 , **UpperCamelCase__ : Dict ) -> str: """simple docstring""" __magic_name__ = [] if num_vec_per_token == 1: self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) output.append(UpperCamelCase__ ) else: __magic_name__ = [] for i in range(UpperCamelCase__ ): __magic_name__ = placeholder_token + F'''_{i}''' self.try_adding_tokens(UpperCamelCase__ , *UpperCamelCase__ , **UpperCamelCase__ ) output.append(UpperCamelCase__ ) # handle cases where there is a new placeholder token that contains the current placeholder token but is larger for token in self.token_map: if token in placeholder_token: raise ValueError( F'''The tokenizer already has placeholder token {token} that can get confused with''' F''' {placeholder_token}keep placeholder tokens independent''' ) __magic_name__ = output def _lowercase ( self : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str]=False , UpperCamelCase__ : List[Any]=1.0 ) -> Optional[int]: """simple docstring""" if isinstance(UpperCamelCase__ , UpperCamelCase__ ): __magic_name__ = [] for i in range(len(UpperCamelCase__ ) ): output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=UpperCamelCase__ ) ) return output for placeholder_token in self.token_map: if placeholder_token in text: __magic_name__ = self.token_map[placeholder_token] __magic_name__ = tokens[: 1 + int(len(UpperCamelCase__ ) * prop_tokens_to_load )] if vector_shuffle: __magic_name__ = copy.copy(UpperCamelCase__ ) random.shuffle(UpperCamelCase__ ) __magic_name__ = text.replace(UpperCamelCase__ , """ """.join(UpperCamelCase__ ) ) return text def __call__( self : Optional[int] , UpperCamelCase__ : List[str] , *UpperCamelCase__ : Dict , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : str=1.0 , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return super().__call__( self.replace_placeholder_tokens_in_text( UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , ) def _lowercase ( self : int , UpperCamelCase__ : List[str] , *UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : Union[str, Any]=1.0 , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" return super().encode( self.replace_placeholder_tokens_in_text( UpperCamelCase__ , vector_shuffle=UpperCamelCase__ , prop_tokens_to_load=UpperCamelCase__ ) , *UpperCamelCase__ , **UpperCamelCase__ , )
76
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""note_seq"""] def __init__( self : Any , *UpperCamelCase__ : str , **UpperCamelCase__ : List[Any] ) -> Optional[int]: """simple docstring""" requires_backends(self , ["""note_seq"""] ) @classmethod def _lowercase ( cls : str , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Tuple ) -> Dict: """simple docstring""" requires_backends(cls , ["""note_seq"""] ) @classmethod def _lowercase ( cls : List[str] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : Tuple ) -> int: """simple docstring""" requires_backends(cls , ["""note_seq"""] )
76
1
__lowerCAmelCase : Optional[Any] = 8.314_4598 def a__ ( A_, A_ ): '''simple docstring''' if temperature < 0: raise Exception("""Temperature cannot be less than 0 K""" ) if molar_mass <= 0: raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example __lowerCAmelCase : Any = 300 __lowerCAmelCase : List[Any] = 28 __lowerCAmelCase : Optional[int] = rms_speed_of_molecule(temperature, molar_mass) print(F'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
76
def a__ ( A_ ): '''simple docstring''' return " ".join( """""".join(word[::-1] ) if len(A_ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('Hey wollef sroirraw'))
76
1
from __future__ import annotations def a__ ( A_ ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def a__ ( A_ ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def a__ ( ): '''simple docstring''' __magic_name__ = encode(input("""-> """ ).strip().lower() ) print("""Encoded: """, A_ ) print("""Decoded:""", decode(A_ ) ) if __name__ == "__main__": main()
76
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = FunnelTokenizer a__ = FunnelTokenizerFast a__ = True a__ = True def _lowercase ( self : List[Any] ) -> str: """simple docstring""" super().setUp() __magic_name__ = [ """<unk>""", """<cls>""", """<sep>""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest""", ] __magic_name__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer: vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) ) def _lowercase ( self : Dict , **UpperCamelCase__ : Tuple ) -> Union[str, Any]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : str , **UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : str ) -> List[Any]: """simple docstring""" __magic_name__ = """UNwant\u00E9d,running""" __magic_name__ = """unwanted, running""" return input_text, output_text def _lowercase ( self : Union[str, Any] ) -> Tuple: """simple docstring""" __magic_name__ = self.tokenizer_class(self.vocab_file ) __magic_name__ = tokenizer.tokenize("""UNwant\u00E9d,running""" ) self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] ) def _lowercase ( self : str ) -> List[Any]: """simple docstring""" __magic_name__ = self.get_tokenizers(do_lower_case=UpperCamelCase__ ) for tokenizer in tokenizers: __magic_name__ = tokenizer("""UNwant\u00E9d,running""" ) __magic_name__ = len(inputs["""input_ids"""] ) - 1 self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len ) __magic_name__ = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" ) self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len )
76
1
import argparse import os import re import numpy as np import PIL import torch from timm import create_model from torch.optim.lr_scheduler import OneCycleLR from torch.utils.data import DataLoader, Dataset from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor from accelerate import Accelerator def a__ ( A_ ): '''simple docstring''' __magic_name__ = fname.split(os.path.sep )[-1] return re.search(R"""^(.*)_\d+\.jpg$""", A_ ).groups()[0] class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : Optional[Any]=None ) -> str: """simple docstring""" __magic_name__ = file_names __magic_name__ = image_transform __magic_name__ = label_to_id def __len__( self : str ) -> Tuple: """simple docstring""" return len(self.file_names ) def __getitem__( self : Optional[Any] , UpperCamelCase__ : Any ) -> Tuple: """simple docstring""" __magic_name__ = self.file_names[idx] __magic_name__ = PIL.Image.open(UpperCamelCase__ ) __magic_name__ = raw_image.convert("""RGB""" ) if self.image_transform is not None: __magic_name__ = self.image_transform(UpperCamelCase__ ) __magic_name__ = extract_label(UpperCamelCase__ ) if self.label_to_id is not None: __magic_name__ = self.label_to_id[label] return {"image": image, "label": label} def a__ ( A_, A_ ): '''simple docstring''' if args.with_tracking: __magic_name__ = Accelerator( cpu=args.cpu, mixed_precision=args.mixed_precision, log_with="""all""", project_dir=args.project_dir ) else: __magic_name__ = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs __magic_name__ = config["""lr"""] __magic_name__ = int(config["""num_epochs"""] ) __magic_name__ = int(config["""seed"""] ) __magic_name__ = int(config["""batch_size"""] ) __magic_name__ = config["""image_size"""] if not isinstance(A_, (list, tuple) ): __magic_name__ = (image_size, image_size) # Parse out whether we are saving every epoch or after a certain number of batches if hasattr(args.checkpointing_steps, """isdigit""" ): if args.checkpointing_steps == "epoch": __magic_name__ = args.checkpointing_steps elif args.checkpointing_steps.isdigit(): __magic_name__ = int(args.checkpointing_steps ) else: raise ValueError( f'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' ) else: __magic_name__ = None # We need to initialize the trackers we use, and also store our configuration if args.with_tracking: __magic_name__ = os.path.split(A_ )[-1].split(""".""" )[0] accelerator.init_trackers(A_, A_ ) # Grab all the image filenames __magic_name__ = [os.path.join(args.data_dir, A_ ) for fname in os.listdir(args.data_dir ) if fname.endswith(""".jpg""" )] # Build the label correspondences __magic_name__ = [extract_label(A_ ) for fname in file_names] __magic_name__ = list(set(A_ ) ) id_to_label.sort() __magic_name__ = {lbl: i for i, lbl in enumerate(A_ )} # Set the seed before splitting the data. np.random.seed(A_ ) torch.manual_seed(A_ ) torch.cuda.manual_seed_all(A_ ) # Split our filenames between train and validation __magic_name__ = np.random.permutation(len(A_ ) ) __magic_name__ = int(0.8 * len(A_ ) ) __magic_name__ = random_perm[:cut] __magic_name__ = random_perm[cut:] # For training we use a simple RandomResizedCrop __magic_name__ = Compose([RandomResizedCrop(A_, scale=(0.5, 1.0) ), ToTensor()] ) __magic_name__ = PetsDataset( [file_names[i] for i in train_split], image_transform=A_, label_to_id=A_ ) # For evaluation, we use a deterministic Resize __magic_name__ = Compose([Resize(A_ ), ToTensor()] ) __magic_name__ = PetsDataset([file_names[i] for i in eval_split], image_transform=A_, label_to_id=A_ ) # Instantiate dataloaders. __magic_name__ = DataLoader(A_, shuffle=A_, batch_size=A_, num_workers=4 ) __magic_name__ = DataLoader(A_, shuffle=A_, batch_size=A_, num_workers=4 ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) __magic_name__ = create_model("""resnet50d""", pretrained=A_, num_classes=len(A_ ) ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). __magic_name__ = model.to(accelerator.device ) # Freezing the base model for param in model.parameters(): __magic_name__ = False for param in model.get_classifier().parameters(): __magic_name__ = True # We normalize the batches of images to be a bit faster. __magic_name__ = torch.tensor(model.default_cfg["""mean"""] )[None, :, None, None].to(accelerator.device ) __magic_name__ = torch.tensor(model.default_cfg["""std"""] )[None, :, None, None].to(accelerator.device ) # Instantiate optimizer __magic_name__ = torch.optim.Adam(params=model.parameters(), lr=lr / 25 ) # Instantiate learning rate scheduler __magic_name__ = OneCycleLR(optimizer=A_, max_lr=A_, epochs=A_, steps_per_epoch=len(A_ ) ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare( A_, A_, A_, A_, A_ ) # We need to keep track of how many total steps we have iterated over __magic_name__ = 0 # We also need to keep track of the starting epoch so files are named properly __magic_name__ = 0 # Potentially load in the weights and states from a previous save if args.resume_from_checkpoint: if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "": accelerator.print(f'''Resumed from checkpoint: {args.resume_from_checkpoint}''' ) accelerator.load_state(args.resume_from_checkpoint ) __magic_name__ = os.path.basename(args.resume_from_checkpoint ) else: # Get the most recent checkpoint __magic_name__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()] dirs.sort(key=os.path.getctime ) __magic_name__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last # Extract `epoch_{i}` or `step_{i}` __magic_name__ = os.path.splitext(A_ )[0] if "epoch" in training_difference: __magic_name__ = int(training_difference.replace("""epoch_""", """""" ) ) + 1 __magic_name__ = None else: __magic_name__ = int(training_difference.replace("""step_""", """""" ) ) __magic_name__ = resume_step // len(A_ ) resume_step -= starting_epoch * len(A_ ) # Now we train the model for epoch in range(A_, A_ ): model.train() if args.with_tracking: __magic_name__ = 0 if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None: # We need to skip steps until we reach the resumed step __magic_name__ = accelerator.skip_first_batches(A_, A_ ) overall_step += resume_step else: # After the first iteration though, we need to go back to the original dataloader __magic_name__ = train_dataloader for batch in active_dataloader: # We could avoid this line since we set the accelerator with `device_placement=True`. __magic_name__ = {k: v.to(accelerator.device ) for k, v in batch.items()} __magic_name__ = (batch["""image"""] - mean) / std __magic_name__ = model(A_ ) __magic_name__ = torch.nn.functional.cross_entropy(A_, batch["""label"""] ) # We keep track of the loss at each epoch if args.with_tracking: total_loss += loss.detach().float() accelerator.backward(A_ ) optimizer.step() lr_scheduler.step() optimizer.zero_grad() overall_step += 1 if isinstance(A_, A_ ): __magic_name__ = f'''step_{overall_step}''' if overall_step % checkpointing_steps == 0: if args.output_dir is not None: __magic_name__ = os.path.join(args.output_dir, A_ ) accelerator.save_state(A_ ) model.eval() __magic_name__ = 0 __magic_name__ = 0 for step, batch in enumerate(A_ ): # We could avoid this line since we set the accelerator with `device_placement=True`. __magic_name__ = {k: v.to(accelerator.device ) for k, v in batch.items()} __magic_name__ = (batch["""image"""] - mean) / std with torch.no_grad(): __magic_name__ = model(A_ ) __magic_name__ = outputs.argmax(dim=-1 ) __magic_name__ , __magic_name__ = accelerator.gather_for_metrics((predictions, batch["""label"""]) ) __magic_name__ = predictions == references num_elems += accurate_preds.shape[0] accurate += accurate_preds.long().sum() __magic_name__ = accurate.item() / num_elems # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}: {100 * eval_metric:.2f}''' ) if args.with_tracking: accelerator.log( { """accuracy""": 100 * eval_metric, """train_loss""": total_loss.item() / len(A_ ), """epoch""": epoch, }, step=A_, ) if checkpointing_steps == "epoch": __magic_name__ = f'''epoch_{epoch}''' if args.output_dir is not None: __magic_name__ = os.path.join(args.output_dir, A_ ) accelerator.save_state(A_ ) if args.with_tracking: accelerator.end_training() def a__ ( ): '''simple docstring''' __magic_name__ = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument("""--data_dir""", required=A_, help="""The data folder on disk.""" ) parser.add_argument("""--fp16""", action="""store_true""", help="""If passed, will use FP16 training.""" ) parser.add_argument( """--mixed_precision""", type=A_, default=A_, choices=["""no""", """fp16""", """bf16""", """fp8"""], help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""", ) parser.add_argument("""--cpu""", action="""store_true""", help="""If passed, will train on the CPU.""" ) parser.add_argument( """--checkpointing_steps""", type=A_, default=A_, help="""Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.""", ) parser.add_argument( """--output_dir""", type=A_, default=""".""", help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""", ) parser.add_argument( """--resume_from_checkpoint""", type=A_, default=A_, help="""If the training should continue from a checkpoint folder.""", ) parser.add_argument( """--with_tracking""", action="""store_true""", help="""Whether to load in all available experiment trackers from the environment and use them for logging.""", ) parser.add_argument( """--project_dir""", type=A_, default="""logs""", help="""Location on where to store experiment tracking logs` and relevent project information""", ) __magic_name__ = parser.parse_args() __magic_name__ = {"""lr""": 3e-2, """num_epochs""": 3, """seed""": 42, """batch_size""": 64, """image_size""": 224} training_function(A_, A_ ) if __name__ == "__main__": main()
76
from collections import deque from .hash_table import HashTable class UpperCAmelCase_ ( _A ): '''simple docstring''' def __init__( self : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Optional[Any] ) -> Optional[Any]: """simple docstring""" super().__init__(*UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = deque([] ) if self.values[key] is None else self.values[key] self.values[key].appendleft(UpperCamelCase__ ) __magic_name__ = self.values[key] def _lowercase ( self : List[str] ) -> int: """simple docstring""" return ( sum(self.charge_factor - len(UpperCamelCase__ ) for slot in self.values ) / self.size_table * self.charge_factor ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple=None ) -> str: """simple docstring""" if not ( len(self.values[key] ) == self.charge_factor and self.values.count(UpperCamelCase__ ) == 0 ): return key return super()._collision_resolution(UpperCamelCase__ , UpperCamelCase__ )
76
1
import unittest from typing import Dict, List, Optional, Union import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import BridgeTowerImageProcessor class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' def __init__( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : int = 32 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = [0.48145466, 0.4578275, 0.40821073] , UpperCamelCase__ : Optional[Union[float, List[float]]] = [0.26862954, 0.26130258, 0.27577711] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Any=7 , UpperCamelCase__ : List[Any]=30 , UpperCamelCase__ : int=400 , UpperCamelCase__ : List[Any]=3 , ) -> str: """simple docstring""" __magic_name__ = parent __magic_name__ = do_resize __magic_name__ = size if size is not None else {"""shortest_edge""": 288} __magic_name__ = size_divisor __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = do_center_crop __magic_name__ = image_mean __magic_name__ = image_std __magic_name__ = do_pad __magic_name__ = batch_size __magic_name__ = num_channels __magic_name__ = min_resolution __magic_name__ = max_resolution def _lowercase ( self : List[Any] ) -> Optional[int]: """simple docstring""" return { "image_mean": self.image_mean, "image_std": self.image_std, "do_normalize": self.do_normalize, "do_resize": self.do_resize, "size": self.size, "size_divisor": self.size_divisor, } def _lowercase ( self : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str=False ) -> Any: """simple docstring""" if not batched: __magic_name__ = self.size["""shortest_edge"""] __magic_name__ = image_inputs[0] if isinstance(UpperCamelCase__ , Image.Image ): __magic_name__ , __magic_name__ = image.size else: __magic_name__ , __magic_name__ = image.shape[1], image.shape[2] __magic_name__ = size / min(UpperCamelCase__ , UpperCamelCase__ ) if h < w: __magic_name__ , __magic_name__ = size, scale * w else: __magic_name__ , __magic_name__ = scale * h, size __magic_name__ = int((1333 / 800) * size ) if max(UpperCamelCase__ , UpperCamelCase__ ) > max_size: __magic_name__ = max_size / max(UpperCamelCase__ , UpperCamelCase__ ) __magic_name__ = newh * scale __magic_name__ = neww * scale __magic_name__ , __magic_name__ = int(newh + 0.5 ), int(neww + 0.5 ) __magic_name__ , __magic_name__ = ( newh // self.size_divisor * self.size_divisor, neww // self.size_divisor * self.size_divisor, ) else: __magic_name__ = [] for image in image_inputs: __magic_name__ , __magic_name__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) __magic_name__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[0] )[0] __magic_name__ = max(UpperCamelCase__ , key=lambda UpperCamelCase__ : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class UpperCAmelCase_ ( _A , unittest.TestCase ): '''simple docstring''' a__ = BridgeTowerImageProcessor if is_vision_available() else None def _lowercase ( self : str ) -> Dict: """simple docstring""" __magic_name__ = BridgeTowerImageProcessingTester(self ) @property def _lowercase ( self : List[str] ) -> Optional[Any]: """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def _lowercase ( self : Tuple ) -> Dict: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase__ , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """image_std""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """size""" ) ) self.assertTrue(hasattr(UpperCamelCase__ , """size_divisor""" ) ) def _lowercase ( self : Union[str, Any] ) -> int: """simple docstring""" pass def _lowercase ( self : Optional[int] ) -> Tuple: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , Image.Image ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values __magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : str ) -> str: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , numpify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , np.ndarray ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values __magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def _lowercase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __magic_name__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors __magic_name__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase__ , torchify=UpperCamelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCamelCase__ , torch.Tensor ) # Test not batched input __magic_name__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values __magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched __magic_name__ = image_processing(UpperCamelCase__ , return_tensors="""pt""" ).pixel_values __magic_name__ , __magic_name__ = self.image_processor_tester.get_expected_values(UpperCamelCase__ , batched=UpperCamelCase__ ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , )
76
# Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType from ...utils.imports import is_botoa_available from .config_args import SageMakerConfig from .config_utils import ( DYNAMO_BACKENDS, _ask_field, _ask_options, _convert_dynamo_backend, _convert_mixed_precision, _convert_sagemaker_distributed_mode, _convert_yes_no_to_bool, ) if is_botoa_available(): import botoa # noqa: F401 def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ {"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""} ], } try: # create the role, associated with the chosen trust policy iam_client.create_role( RoleName=A_, AssumeRolePolicyDocument=json.dumps(A_, indent=2 ) ) __magic_name__ = { """Version""": """2012-10-17""", """Statement""": [ { """Effect""": """Allow""", """Action""": [ """sagemaker:*""", """ecr:GetDownloadUrlForLayer""", """ecr:BatchGetImage""", """ecr:BatchCheckLayerAvailability""", """ecr:GetAuthorizationToken""", """cloudwatch:PutMetricData""", """cloudwatch:GetMetricData""", """cloudwatch:GetMetricStatistics""", """cloudwatch:ListMetrics""", """logs:CreateLogGroup""", """logs:CreateLogStream""", """logs:DescribeLogStreams""", """logs:PutLogEvents""", """logs:GetLogEvents""", """s3:CreateBucket""", """s3:ListBucket""", """s3:GetBucketLocation""", """s3:GetObject""", """s3:PutObject""", ], """Resource""": """*""", } ], } # attach policy to role iam_client.put_role_policy( RoleName=A_, PolicyName=f'''{role_name}_policy_permission''', PolicyDocument=json.dumps(A_, indent=2 ), ) except iam_client.exceptions.EntityAlreadyExistsException: print(f'''role {role_name} already exists. Using existing one''' ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = botoa.client("""iam""" ) return iam_client.get_role(RoleName=A_ )["Role"]["Arn"] def a__ ( ): '''simple docstring''' __magic_name__ = _ask_options( """How do you want to authorize?""", ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """], A_, ) __magic_name__ = None if credentials_configuration == 0: __magic_name__ = _ask_field("""Enter your AWS Profile name: [default] """, default="""default""" ) __magic_name__ = aws_profile else: print( """Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,""" """`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" ) __magic_name__ = _ask_field("""AWS Access Key ID: """ ) __magic_name__ = aws_access_key_id __magic_name__ = _ask_field("""AWS Secret Access Key: """ ) __magic_name__ = aws_secret_access_key __magic_name__ = _ask_field("""Enter your AWS Region: [us-east-1]""", default="""us-east-1""" ) __magic_name__ = aws_region __magic_name__ = _ask_options( """Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""", ["""Provide IAM Role name""", """Create new IAM role using credentials"""], A_, ) if role_management == 0: __magic_name__ = _ask_field("""Enter your IAM role name: """ ) else: __magic_name__ = """accelerate_sagemaker_execution_role""" print(f'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' ) _create_iam_role_for_sagemaker(A_ ) __magic_name__ = _ask_field( """Do you want to use custom Docker image? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_custom_docker_image: __magic_name__ = _ask_field("""Enter your Docker image: """, lambda A_ : str(A_ ).lower() ) __magic_name__ = _ask_field( """Do you want to provide SageMaker input channels with data locations? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_inputs_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_field( """Do you want to enable SageMaker metrics? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = None if is_sagemaker_metrics_enabled: __magic_name__ = _ask_field( """Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """, lambda A_ : str(A_ ).lower(), ) __magic_name__ = _ask_options( """What is the distributed mode?""", ["""No distributed training""", """Data parallelism"""], _convert_sagemaker_distributed_mode, ) __magic_name__ = {} __magic_name__ = _ask_field( """Do you wish to optimize your script with torch dynamo?[yes/NO]:""", _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_dynamo: __magic_name__ = """dynamo_""" __magic_name__ = _ask_options( """Which dynamo backend would you like to use?""", [x.lower() for x in DYNAMO_BACKENDS], _convert_dynamo_backend, default=2, ) __magic_name__ = _ask_field( """Do you want to customize the defaults sent to torch.compile? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) if use_custom_options: __magic_name__ = _ask_options( """Which mode do you want to use?""", A_, lambda A_ : TORCH_DYNAMO_MODES[int(A_ )], default="""default""", ) __magic_name__ = _ask_field( """Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = _ask_field( """Do you want to enable dynamic shape tracing? [yes/NO]: """, _convert_yes_no_to_bool, default=A_, error_message="""Please enter yes or no.""", ) __magic_name__ = """Which EC2 instance type you want to use for your training?""" if distributed_type != SageMakerDistributedType.NO: __magic_name__ = _ask_options( A_, A_, lambda A_ : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(A_ )] ) else: eca_instance_query += "? [ml.p3.2xlarge]:" __magic_name__ = _ask_field(A_, lambda A_ : str(A_ ).lower(), default="""ml.p3.2xlarge""" ) __magic_name__ = 1 if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL): __magic_name__ = _ask_field( """How many machines do you want use? [1]: """, A_, default=1, ) __magic_name__ = _ask_options( """Do you wish to use FP16 or BF16 (mixed precision)?""", ["""no""", """fp16""", """bf16""", """fp8"""], _convert_mixed_precision, ) if use_dynamo and mixed_precision == "no": print( """Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" ) return SageMakerConfig( image_uri=A_, compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER, distributed_type=A_, use_cpu=A_, dynamo_config=A_, eca_instance_type=A_, profile=A_, region=A_, iam_role_name=A_, mixed_precision=A_, num_machines=A_, sagemaker_inputs_file=A_, sagemaker_metrics_file=A_, )
76
1
# flake8: noqa # Lint as: python3 __lowerCAmelCase : Tuple = [ 'VerificationMode', 'Version', 'disable_progress_bar', 'enable_progress_bar', 'is_progress_bar_enabled', 'experimental', ] from .info_utils import VerificationMode from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled from .version import Version from .experimental import experimental
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging __lowerCAmelCase : Dict = logging.get_logger(__name__) if is_vision_available(): import PIL class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : Optional[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = True , **UpperCamelCase__ : int , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ , param_name="""crop_size""" ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN __magic_name__ = image_std if image_std is not None else OPENAI_CLIP_STD __magic_name__ = do_convert_rgb def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Optional[Any] , ) -> Optional[int]: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Dict , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Optional[ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : Dict , ) -> PIL.Image.Image: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ , param_name="""crop_size""" , default_to_square=UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: __magic_name__ = [convert_to_rgb(UpperCamelCase__ ) for image in images] # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
from argparse import ArgumentParser, Namespace from typing import Any, List, Optional from ..pipelines import Pipeline, get_supported_tasks, pipeline from ..utils import logging from . import BaseTransformersCLICommand try: from fastapi import Body, FastAPI, HTTPException from fastapi.routing import APIRoute from pydantic import BaseModel from starlette.responses import JSONResponse from uvicorn import run __lowerCAmelCase : Tuple = True except (ImportError, AttributeError): __lowerCAmelCase : Union[str, Any] = object def a__ ( *A_, **A_ ): '''simple docstring''' pass __lowerCAmelCase : int = False __lowerCAmelCase : List[Any] = logging.get_logger('transformers-cli/serving') def a__ ( A_ ): '''simple docstring''' __magic_name__ = pipeline( task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, ) return ServeCommand(A_, args.host, args.port, args.workers ) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = 42 class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = 42 a__ = 42 class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = 42 class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = 42 class UpperCAmelCase_ ( _A ): '''simple docstring''' @staticmethod def _lowercase ( UpperCamelCase__ : ArgumentParser ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parser.add_parser( """serve""" , help="""CLI tool to run inference requests through REST and GraphQL endpoints.""" ) serve_parser.add_argument( """--task""" , type=UpperCamelCase__ , choices=get_supported_tasks() , help="""The task to run the pipeline on""" , ) serve_parser.add_argument("""--host""" , type=UpperCamelCase__ , default="""localhost""" , help="""Interface the server will listen on.""" ) serve_parser.add_argument("""--port""" , type=UpperCamelCase__ , default=8888 , help="""Port the serving will listen to.""" ) serve_parser.add_argument("""--workers""" , type=UpperCamelCase__ , default=1 , help="""Number of http workers""" ) serve_parser.add_argument("""--model""" , type=UpperCamelCase__ , help="""Model's name or path to stored model.""" ) serve_parser.add_argument("""--config""" , type=UpperCamelCase__ , help="""Model's config name or path to stored model.""" ) serve_parser.add_argument("""--tokenizer""" , type=UpperCamelCase__ , help="""Tokenizer name to use.""" ) serve_parser.add_argument( """--device""" , type=UpperCamelCase__ , default=-1 , help="""Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)""" , ) serve_parser.set_defaults(func=UpperCamelCase__ ) def __init__( self : Optional[Any] , UpperCamelCase__ : Pipeline , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Tuple: """simple docstring""" __magic_name__ = pipeline __magic_name__ = host __magic_name__ = port __magic_name__ = workers if not _serve_dependencies_installed: raise RuntimeError( """Using serve command requires FastAPI and uvicorn. """ """Please install transformers with [serving]: pip install \"transformers[serving]\".""" """Or install FastAPI and uvicorn separately.""" ) else: logger.info(F'''Serving model over {host}:{port}''' ) __magic_name__ = FastAPI( routes=[ APIRoute( """/""" , self.model_info , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["""GET"""] , ), APIRoute( """/tokenize""" , self.tokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["""POST"""] , ), APIRoute( """/detokenize""" , self.detokenize , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["""POST"""] , ), APIRoute( """/forward""" , self.forward , response_model=UpperCamelCase__ , response_class=UpperCamelCase__ , methods=["""POST"""] , ), ] , timeout=600 , ) def _lowercase ( self : Tuple ) -> str: """simple docstring""" run(self._app , host=self.host , port=self.port , workers=self.workers ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) ) def _lowercase ( self : List[Any] , UpperCamelCase__ : str = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , UpperCamelCase__ : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ) -> int: """simple docstring""" try: __magic_name__ = self._pipeline.tokenizer.tokenize(UpperCamelCase__ ) if return_ids: __magic_name__ = self._pipeline.tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) return ServeTokenizeResult(tokens=UpperCamelCase__ , tokens_ids=UpperCamelCase__ ) else: return ServeTokenizeResult(tokens=UpperCamelCase__ ) except Exception as e: raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(UpperCamelCase__ )} ) def _lowercase ( self : List[Any] , UpperCamelCase__ : List[int] = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , UpperCamelCase__ : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , UpperCamelCase__ : bool = Body(UpperCamelCase__ , embed=UpperCamelCase__ ) , ) -> List[Any]: """simple docstring""" try: __magic_name__ = self._pipeline.tokenizer.decode(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return ServeDeTokenizeResult(model="""""" , text=UpperCamelCase__ ) except Exception as e: raise HTTPException(status_code=500 , detail={"""model""": """""", """error""": str(UpperCamelCase__ )} ) async def _lowercase ( self : List[str] , UpperCamelCase__ : Optional[Any]=Body(UpperCamelCase__ , embed=UpperCamelCase__ ) ) -> Dict: """simple docstring""" if len(UpperCamelCase__ ) == 0: return ServeForwardResult(output=[] , attention=[] ) try: # Forward through the model __magic_name__ = self._pipeline(UpperCamelCase__ ) return ServeForwardResult(output=UpperCamelCase__ ) except Exception as e: raise HTTPException(500 , {"""error""": str(UpperCamelCase__ )} )
76
import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class UpperCAmelCase_ : '''simple docstring''' def __init__( self : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple=13 , UpperCamelCase__ : Dict=7 , UpperCamelCase__ : Dict=True , UpperCamelCase__ : List[str]=True , UpperCamelCase__ : Optional[int]=True , UpperCamelCase__ : int=True , UpperCamelCase__ : Optional[int]=99 , UpperCamelCase__ : List[Any]=32 , UpperCamelCase__ : Any=5 , UpperCamelCase__ : List[Any]=4 , UpperCamelCase__ : str=37 , UpperCamelCase__ : Any="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=512 , UpperCamelCase__ : Union[str, Any]=16 , UpperCamelCase__ : Dict=2 , UpperCamelCase__ : Any=0.02 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Dict=4 , UpperCamelCase__ : List[Any]=None , ) -> Union[str, Any]: """simple docstring""" __magic_name__ = parent __magic_name__ = batch_size __magic_name__ = seq_length __magic_name__ = is_training __magic_name__ = use_input_mask __magic_name__ = use_token_type_ids __magic_name__ = use_labels __magic_name__ = vocab_size __magic_name__ = hidden_size __magic_name__ = num_hidden_layers __magic_name__ = num_attention_heads __magic_name__ = intermediate_size __magic_name__ = hidden_act __magic_name__ = hidden_dropout_prob __magic_name__ = attention_probs_dropout_prob __magic_name__ = max_position_embeddings __magic_name__ = type_vocab_size __magic_name__ = type_sequence_label_size __magic_name__ = initializer_range __magic_name__ = num_labels __magic_name__ = num_choices __magic_name__ = scope def _lowercase ( self : Any ) -> Any: """simple docstring""" __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __magic_name__ = None if self.use_input_mask: __magic_name__ = random_attention_mask([self.batch_size, self.seq_length] ) __magic_name__ = None if self.use_token_type_ids: __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __magic_name__ = None __magic_name__ = None __magic_name__ = None if self.use_labels: __magic_name__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __magic_name__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __magic_name__ = ids_tensor([self.batch_size] , self.num_choices ) __magic_name__ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowercase ( self : Tuple ) -> Any: """simple docstring""" return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , ) def _lowercase ( self : Any , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : int , UpperCamelCase__ : str , UpperCamelCase__ : Dict , UpperCamelCase__ : str ) -> Tuple: """simple docstring""" __magic_name__ = NystromformerModel(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ , token_type_ids=UpperCamelCase__ ) __magic_name__ = model(UpperCamelCase__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def _lowercase ( self : Optional[int] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] ) -> str: """simple docstring""" __magic_name__ = NystromformerForMaskedLM(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def _lowercase ( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerForQuestionAnswering(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , start_positions=UpperCamelCase__ , end_positions=UpperCamelCase__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[str] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict , UpperCamelCase__ : Any ) -> Optional[int]: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForSequenceClassification(UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def _lowercase ( self : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str] , UpperCamelCase__ : str , UpperCamelCase__ : Any ) -> Dict: """simple docstring""" __magic_name__ = self.num_labels __magic_name__ = NystromformerForTokenClassification(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = model(UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : int , UpperCamelCase__ : int ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.num_choices __magic_name__ = NystromformerForMultipleChoice(config=UpperCamelCase__ ) model.to(UpperCamelCase__ ) model.eval() __magic_name__ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __magic_name__ = model( UpperCamelCase__ , attention_mask=UpperCamelCase__ , token_type_ids=UpperCamelCase__ , labels=UpperCamelCase__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def _lowercase ( self : int ) -> List[Any]: """simple docstring""" __magic_name__ = self.prepare_config_and_inputs() ( ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ( __magic_name__ ) , ) = config_and_inputs __magic_name__ = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class UpperCAmelCase_ ( _A , _A , unittest.TestCase ): '''simple docstring''' a__ = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) a__ = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) a__ = False a__ = False def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = NystromformerModelTester(self ) __magic_name__ = ConfigTester(self , config_class=UpperCamelCase__ , hidden_size=37 ) def _lowercase ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def _lowercase ( self : Optional[Any] ) -> Any: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : Optional[Any] ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __magic_name__ = type self.model_tester.create_and_check_model(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*UpperCamelCase__ ) def _lowercase ( self : Union[str, Any] ) -> str: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*UpperCamelCase__ ) def _lowercase ( self : Dict ) -> List[Any]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*UpperCamelCase__ ) def _lowercase ( self : str ) -> int: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*UpperCamelCase__ ) def _lowercase ( self : List[Any] ) -> List[str]: """simple docstring""" __magic_name__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*UpperCamelCase__ ) @slow def _lowercase ( self : str ) -> Tuple: """simple docstring""" for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __magic_name__ = NystromformerModel.from_pretrained(UpperCamelCase__ ) self.assertIsNotNone(UpperCamelCase__ ) @require_torch class UpperCAmelCase_ ( unittest.TestCase ): '''simple docstring''' @slow def _lowercase ( self : Union[str, Any] ) -> Optional[Any]: """simple docstring""" __magic_name__ = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): __magic_name__ = model(UpperCamelCase__ )[0] __magic_name__ = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , UpperCamelCase__ ) __magic_name__ = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCamelCase__ , atol=1E-4 ) ) @slow def _lowercase ( self : int ) -> str: """simple docstring""" __magic_name__ = """the [MASK] of Belgium is Brussels""" __magic_name__ = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" ) __magic_name__ = tokenizer(UpperCamelCase__ , return_tensors="""pt""" ) with torch.no_grad(): __magic_name__ = model(encoding.input_ids ).logits __magic_name__ = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(UpperCamelCase__ ) , """capital""" )
76
1
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging __lowerCAmelCase : Dict = logging.get_logger(__name__) __lowerCAmelCase : Dict = '▁' __lowerCAmelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model'} __lowerCAmelCase : List[Any] = { 'vocab_file': { 'xlm-roberta-base': 'https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large': 'https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model', 'xlm-roberta-large-finetuned-conll02-dutch': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll02-spanish': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-english': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model' ), 'xlm-roberta-large-finetuned-conll03-german': ( 'https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model' ), } } __lowerCAmelCase : Optional[int] = { 'xlm-roberta-base': 512, 'xlm-roberta-large': 512, 'xlm-roberta-large-finetuned-conll02-dutch': 512, 'xlm-roberta-large-finetuned-conll02-spanish': 512, 'xlm-roberta-large-finetuned-conll03-english': 512, 'xlm-roberta-large-finetuned-conll03-german': 512, } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ["""input_ids""", """attention_mask"""] def __init__( self : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : List[str]="<s>" , UpperCamelCase__ : Tuple="</s>" , UpperCamelCase__ : List[str]="</s>" , UpperCamelCase__ : int="<s>" , UpperCamelCase__ : Any="<unk>" , UpperCamelCase__ : Any="<pad>" , UpperCamelCase__ : Optional[Any]="<mask>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Union[str, Any] , ) -> None: """simple docstring""" __magic_name__ = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token __magic_name__ = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , ) __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(UpperCamelCase__ ) ) __magic_name__ = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token __magic_name__ = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab __magic_name__ = 1 __magic_name__ = len(self.sp_model ) + self.fairseq_offset __magic_name__ = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : Tuple ) -> Optional[Any]: """simple docstring""" __magic_name__ = self.__dict__.copy() __magic_name__ = None __magic_name__ = self.sp_model.serialized_model_proto() return state def __setstate__( self : List[Any] , UpperCamelCase__ : Dict ) -> int: """simple docstring""" __magic_name__ = d # for backward compatibility if not hasattr(self , """sp_model_kwargs""" ): __magic_name__ = {} __magic_name__ = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] __magic_name__ = [self.cls_token_id] __magic_name__ = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def _lowercase ( self : Optional[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ ) if token_ids_a is None: return [1] + ([0] * len(UpperCamelCase__ )) + [1] return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1] def _lowercase ( self : str , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __magic_name__ = [self.sep_token_id] __magic_name__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def _lowercase ( self : List[str] ) -> str: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def _lowercase ( self : List[Any] ) -> Union[str, Any]: """simple docstring""" __magic_name__ = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def _lowercase ( self : Any , UpperCamelCase__ : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ ) def _lowercase ( self : Any , UpperCamelCase__ : List[str] ) -> List[str]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __magic_name__ = self.sp_model.PieceToId(UpperCamelCase__ ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : Any ) -> Union[str, Any]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def _lowercase ( self : Union[str, Any] , UpperCamelCase__ : str ) -> Optional[Any]: """simple docstring""" __magic_name__ = """""".join(UpperCamelCase__ ).replace(UpperCamelCase__ , """ """ ).strip() return out_string def _lowercase ( self : List[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(UpperCamelCase__ ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return __magic_name__ = os.path.join( UpperCamelCase__ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , UpperCamelCase__ ) elif not os.path.isfile(self.vocab_file ): with open(UpperCamelCase__ , """wb""" ) as fi: __magic_name__ = self.sp_model.serialized_model_proto() fi.write(UpperCamelCase__ ) return (out_vocab_file,)
76
from ...configuration_utils import PretrainedConfig from ...utils import logging __lowerCAmelCase : Tuple = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = { 'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json', # See all Cvt models at https://huggingface.co/models?filter=cvt } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = """cvt""" def __init__( self : Dict , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : List[Any]=[7, 3, 3] , UpperCamelCase__ : Any=[4, 2, 2] , UpperCamelCase__ : Optional[Any]=[2, 1, 1] , UpperCamelCase__ : Union[str, Any]=[64, 192, 384] , UpperCamelCase__ : Dict=[1, 3, 6] , UpperCamelCase__ : Any=[1, 2, 10] , UpperCamelCase__ : List[str]=[4.0, 4.0, 4.0] , UpperCamelCase__ : Dict=[0.0, 0.0, 0.0] , UpperCamelCase__ : Tuple=[0.0, 0.0, 0.0] , UpperCamelCase__ : Optional[Any]=[0.0, 0.0, 0.1] , UpperCamelCase__ : str=[True, True, True] , UpperCamelCase__ : Optional[Any]=[False, False, True] , UpperCamelCase__ : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase__ : List[Any]=[3, 3, 3] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : Optional[int]=[2, 2, 2] , UpperCamelCase__ : Any=[1, 1, 1] , UpperCamelCase__ : List[str]=[1, 1, 1] , UpperCamelCase__ : int=0.02 , UpperCamelCase__ : int=1E-12 , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = num_channels __magic_name__ = patch_sizes __magic_name__ = patch_stride __magic_name__ = patch_padding __magic_name__ = embed_dim __magic_name__ = num_heads __magic_name__ = depth __magic_name__ = mlp_ratio __magic_name__ = attention_drop_rate __magic_name__ = drop_rate __magic_name__ = drop_path_rate __magic_name__ = qkv_bias __magic_name__ = cls_token __magic_name__ = qkv_projection_method __magic_name__ = kernel_qkv __magic_name__ = padding_kv __magic_name__ = stride_kv __magic_name__ = padding_q __magic_name__ = stride_q __magic_name__ = initializer_range __magic_name__ = layer_norm_eps
76
1
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_convbert import ConvBertTokenizer __lowerCAmelCase : List[str] = logging.get_logger(__name__) __lowerCAmelCase : Union[str, Any] = {'vocab_file': 'vocab.txt'} __lowerCAmelCase : str = { 'vocab_file': { 'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt', 'YituTech/conv-bert-medium-small': ( 'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt' ), 'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt', } } __lowerCAmelCase : List[str] = { 'YituTech/conv-bert-base': 512, 'YituTech/conv-bert-medium-small': 512, 'YituTech/conv-bert-small': 512, } __lowerCAmelCase : Tuple = { 'YituTech/conv-bert-base': {'do_lower_case': True}, 'YituTech/conv-bert-medium-small': {'do_lower_case': True}, 'YituTech/conv-bert-small': {'do_lower_case': True}, } class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = VOCAB_FILES_NAMES a__ = PRETRAINED_VOCAB_FILES_MAP a__ = PRETRAINED_INIT_CONFIGURATION a__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = ConvBertTokenizer def __init__( self : str , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[Any]=None , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[Any]="[UNK]" , UpperCamelCase__ : Optional[Any]="[SEP]" , UpperCamelCase__ : Tuple="[PAD]" , UpperCamelCase__ : Any="[CLS]" , UpperCamelCase__ : int="[MASK]" , UpperCamelCase__ : Any=True , UpperCamelCase__ : Optional[Any]=None , **UpperCamelCase__ : Dict , ) -> List[Any]: """simple docstring""" super().__init__( UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , do_lower_case=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , tokenize_chinese_chars=UpperCamelCase__ , strip_accents=UpperCamelCase__ , **UpperCamelCase__ , ) __magic_name__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("""lowercase""" , UpperCamelCase__ ) != do_lower_case or normalizer_state.get("""strip_accents""" , UpperCamelCase__ ) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , UpperCamelCase__ ) != tokenize_chinese_chars ): __magic_name__ = getattr(UpperCamelCase__ , normalizer_state.pop("""type""" ) ) __magic_name__ = do_lower_case __magic_name__ = strip_accents __magic_name__ = tokenize_chinese_chars __magic_name__ = normalizer_class(**UpperCamelCase__ ) __magic_name__ = do_lower_case def _lowercase ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : str=None ) -> Optional[int]: """simple docstring""" __magic_name__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowercase ( self : List[Any] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ) -> List[int]: """simple docstring""" __magic_name__ = [self.sep_token_id] __magic_name__ = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def _lowercase ( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ) -> Tuple[str]: """simple docstring""" __magic_name__ = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ ) return tuple(UpperCamelCase__ )
76
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available __lowerCAmelCase : List[str] = { 'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'], 'tokenization_canine': ['CanineTokenizer'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __lowerCAmelCase : Optional[Any] = [ 'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST', 'CanineForMultipleChoice', 'CanineForQuestionAnswering', 'CanineForSequenceClassification', 'CanineForTokenClassification', 'CanineLayer', 'CanineModel', 'CaninePreTrainedModel', 'load_tf_weights_in_canine', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys __lowerCAmelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
76
1
import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase : Any = logging.get_logger(__name__) def a__ ( A_, A_=False ): '''simple docstring''' __magic_name__ = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'''blocks.{i}.norm1.weight''', f'''vit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((f'''blocks.{i}.norm1.bias''', f'''vit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((f'''blocks.{i}.attn.proj.weight''', f'''vit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.attn.proj.bias''', f'''vit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((f'''blocks.{i}.norm2.weight''', f'''vit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((f'''blocks.{i}.norm2.bias''', f'''vit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.weight''', f'''vit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc1.bias''', f'''vit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.weight''', f'''vit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((f'''blocks.{i}.mlp.fc2.bias''', f'''vit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" __magic_name__ = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def a__ ( A_, A_, A_=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: __magic_name__ = """""" else: __magic_name__ = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.weight''' ) __magic_name__ = state_dict.pop(f'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict __magic_name__ = in_proj_weight[ : config.hidden_size, : ] __magic_name__ = in_proj_bias[: config.hidden_size] __magic_name__ = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] __magic_name__ = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] __magic_name__ = in_proj_weight[ -config.hidden_size :, : ] __magic_name__ = in_proj_bias[-config.hidden_size :] def a__ ( A_ ): '''simple docstring''' __magic_name__ = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(A_, A_ ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = dct.pop(A_ ) __magic_name__ = val def a__ ( ): '''simple docstring''' __magic_name__ = """http://images.cocodataset.org/val2017/000000039769.jpg""" __magic_name__ = Image.open(requests.get(A_, stream=A_ ).raw ) return im @torch.no_grad() def a__ ( A_, A_ ): '''simple docstring''' __magic_name__ = ViTConfig() __magic_name__ = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": __magic_name__ = True __magic_name__ = int(vit_name[-12:-10] ) __magic_name__ = int(vit_name[-9:-6] ) else: __magic_name__ = 1000 __magic_name__ = """huggingface/label-files""" __magic_name__ = """imagenet-1k-id2label.json""" __magic_name__ = json.load(open(hf_hub_download(A_, A_, repo_type="""dataset""" ), """r""" ) ) __magic_name__ = {int(A_ ): v for k, v in idalabel.items()} __magic_name__ = idalabel __magic_name__ = {v: k for k, v in idalabel.items()} __magic_name__ = int(vit_name[-6:-4] ) __magic_name__ = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): __magic_name__ = 192 __magic_name__ = 768 __magic_name__ = 12 __magic_name__ = 3 elif vit_name[9:].startswith("""small""" ): __magic_name__ = 384 __magic_name__ = 1536 __magic_name__ = 12 __magic_name__ = 6 else: pass else: if vit_name[4:].startswith("""small""" ): __magic_name__ = 768 __magic_name__ = 2304 __magic_name__ = 8 __magic_name__ = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): __magic_name__ = 1024 __magic_name__ = 4096 __magic_name__ = 24 __magic_name__ = 16 elif vit_name[4:].startswith("""huge""" ): __magic_name__ = 1280 __magic_name__ = 5120 __magic_name__ = 32 __magic_name__ = 16 # load original model from timm __magic_name__ = timm.create_model(A_, pretrained=A_ ) timm_model.eval() # load state_dict of original model, remove and rename some keys __magic_name__ = timm_model.state_dict() if base_model: remove_classification_head_(A_ ) __magic_name__ = create_rename_keys(A_, A_ ) for src, dest in rename_keys: rename_key(A_, A_, A_ ) read_in_q_k_v(A_, A_, A_ ) # load HuggingFace model if vit_name[-5:] == "in21k": __magic_name__ = ViTModel(A_ ).eval() else: __magic_name__ = ViTForImageClassification(A_ ).eval() model.load_state_dict(A_ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: __magic_name__ = DeiTImageProcessor(size=config.image_size ) else: __magic_name__ = ViTImageProcessor(size=config.image_size ) __magic_name__ = image_processor(images=prepare_img(), return_tensors="""pt""" ) __magic_name__ = encoding["""pixel_values"""] __magic_name__ = model(A_ ) if base_model: __magic_name__ = timm_model.forward_features(A_ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A_, outputs.pooler_output, atol=1e-3 ) else: __magic_name__ = timm_model(A_ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A_, outputs.logits, atol=1e-3 ) Path(A_ ).mkdir(exist_ok=A_ ) print(f'''Saving model {vit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(A_ ) print(f'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--vit_name', default='vit_base_patch16_224', type=str, help='Name of the ViT timm model you\'d like to convert.', ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.' ) __lowerCAmelCase : Optional[Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
76
import argparse import torch from transformers import ( WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForAudioFrameClassification, WavaVecaForSequenceClassification, WavaVecaForXVector, logging, ) logging.set_verbosity_info() __lowerCAmelCase : str = logging.get_logger(__name__) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForSequenceClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""projector.weight"""] __magic_name__ = downstream_dict["""projector.bias"""] __magic_name__ = downstream_dict["""model.post_net.linear.weight"""] __magic_name__ = downstream_dict["""model.post_net.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForAudioFrameClassification.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""model.linear.weight"""] __magic_name__ = downstream_dict["""model.linear.bias"""] return model def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = WavaVecaForXVector.from_pretrained(A_, config=A_ ) __magic_name__ = downstream_dict["""connector.weight"""] __magic_name__ = downstream_dict["""connector.bias"""] for i, kernel_size in enumerate(hf_config.tdnn_kernel ): __magic_name__ = downstream_dict[ f'''model.framelevel_feature_extractor.module.{i}.kernel.weight''' ] __magic_name__ = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias'''] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear1.bias"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.weight"""] __magic_name__ = downstream_dict["""model.utterancelevel_feature_extractor.linear2.bias"""] __magic_name__ = downstream_dict["""objective.W"""] return model @torch.no_grad() def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = torch.load(A_, map_location="""cpu""" ) __magic_name__ = checkpoint["""Downstream"""] __magic_name__ = WavaVecaConfig.from_pretrained(A_ ) __magic_name__ = WavaVecaFeatureExtractor.from_pretrained( A_, return_attention_mask=A_, do_normalize=A_ ) __magic_name__ = hf_config.architectures[0] if arch.endswith("""ForSequenceClassification""" ): __magic_name__ = convert_classification(A_, A_, A_ ) elif arch.endswith("""ForAudioFrameClassification""" ): __magic_name__ = convert_diarization(A_, A_, A_ ) elif arch.endswith("""ForXVector""" ): __magic_name__ = convert_xvector(A_, A_, A_ ) else: raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' ) if hf_config.use_weighted_layer_sum: __magic_name__ = checkpoint["""Featurizer"""]["""weights"""] hf_feature_extractor.save_pretrained(A_ ) hf_model.save_pretrained(A_ ) if __name__ == "__main__": __lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() parser.add_argument( '--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.' ) parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.') parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.') parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.') __lowerCAmelCase : str = parser.parse_args() convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
76
1
from __future__ import annotations __lowerCAmelCase : Tuple = [True] * 1000001 __lowerCAmelCase : int = 2 while i * i <= 1000000: if seive[i]: for j in range(i * i, 1000001, i): __lowerCAmelCase : Union[str, Any] = False i += 1 def a__ ( A_ ): '''simple docstring''' return seive[n] def a__ ( A_ ): '''simple docstring''' return any(digit in """02468""" for digit in str(A_ ) ) def a__ ( A_ = 1000000 ): '''simple docstring''' __magic_name__ = [2] # result already includes the number 2. for num in range(3, limit + 1, 2 ): if is_prime(A_ ) and not contains_an_even_digit(A_ ): __magic_name__ = str(A_ ) __magic_name__ = [int(str_num[j:] + str_num[:j] ) for j in range(len(A_ ) )] if all(is_prime(A_ ) for i in list_nums ): result.append(A_ ) return result def a__ ( ): '''simple docstring''' return len(find_circular_primes() ) if __name__ == "__main__": print(F'''{len(find_circular_primes()) = }''')
76
import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def a__ ( A_, A_ ): '''simple docstring''' assert isinstance(A_, A_ ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader(A_, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader(A_, features=A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_, split=A_ ).read() _check_text_dataset(A_, A_ ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""", [str, list] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if issubclass(A_, A_ ): __magic_name__ = text_path elif issubclass(A_, A_ ): __magic_name__ = [text_path] __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_dataset(A_, A_ ) def a__ ( A_, A_, A_=("train",) ): '''simple docstring''' assert isinstance(A_, A_ ) for split in splits: __magic_name__ = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""", [False, True] ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): __magic_name__ = TextDatasetReader({"""train""": text_path}, cache_dir=A_, keep_in_memory=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize( """features""", [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ], ) def a__ ( A_, A_, A_ ): '''simple docstring''' __magic_name__ = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" __magic_name__ = {"""text""": """string"""} __magic_name__ = features.copy() if features else default_expected_features __magic_name__ = ( Features({feature: Value(A_ ) for feature, dtype in features.items()} ) if features is not None else None ) __magic_name__ = TextDatasetReader({"""train""": text_path}, features=A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_ ) @pytest.mark.parametrize("""split""", [None, NamedSplit("""train""" ), """train""", """test"""] ) def a__ ( A_, A_, A_ ): '''simple docstring''' if split: __magic_name__ = {split: text_path} else: __magic_name__ = """train""" __magic_name__ = {"""train""": text_path, """test""": text_path} __magic_name__ = tmp_path / """cache""" __magic_name__ = {"""text""": """string"""} __magic_name__ = TextDatasetReader(A_, cache_dir=A_ ).read() _check_text_datasetdict(A_, A_, splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
76
1
from ..utils import DummyObject, requires_backends class UpperCAmelCase_ ( metaclass=_A ): '''simple docstring''' a__ = ["""torch""", """scipy"""] def __init__( self : Optional[int] , *UpperCamelCase__ : List[str] , **UpperCamelCase__ : int ) -> Any: """simple docstring""" requires_backends(self , ["""torch""", """scipy"""] ) @classmethod def _lowercase ( cls : Dict , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : Any ) -> List[Any]: """simple docstring""" requires_backends(cls , ["""torch""", """scipy"""] ) @classmethod def _lowercase ( cls : int , *UpperCamelCase__ : Union[str, Any] , **UpperCamelCase__ : List[Any] ) -> Union[str, Any]: """simple docstring""" requires_backends(cls , ["""torch""", """scipy"""] )
76
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging __lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) class UpperCAmelCase_ ( _A ): '''simple docstring''' a__ = ["""pixel_values"""] def __init__( self : str , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Dict[str, int]] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : List[Any] , ) -> None: """simple docstring""" super().__init__(**UpperCamelCase__ ) __magic_name__ = size if size is not None else {"""shortest_edge""": 256} __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = crop_size if crop_size is not None else {"""height""": 224, """width""": 224} __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_resize __magic_name__ = size __magic_name__ = resample __magic_name__ = do_center_crop __magic_name__ = crop_size __magic_name__ = do_rescale __magic_name__ = rescale_factor __magic_name__ = do_normalize __magic_name__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN __magic_name__ = image_std if image_std is not None else IMAGENET_STANDARD_STD def _lowercase ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Tuple , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) __magic_name__ = get_resize_output_image_size(UpperCamelCase__ , size=size["""shortest_edge"""] , default_to_square=UpperCamelCase__ ) return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : str , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" __magic_name__ = get_size_dict(UpperCamelCase__ ) return center_crop(UpperCamelCase__ , size=(size["""height"""], size["""width"""]) , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Tuple , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Any ) -> np.ndarray: """simple docstring""" return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : List[str] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ) -> np.ndarray: """simple docstring""" return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ ) def _lowercase ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[float] = None , UpperCamelCase__ : Optional[bool] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **UpperCamelCase__ : int , ) -> Dict: """simple docstring""" __magic_name__ = do_resize if do_resize is not None else self.do_resize __magic_name__ = size if size is not None else self.size __magic_name__ = get_size_dict(UpperCamelCase__ , default_to_square=UpperCamelCase__ ) __magic_name__ = resample if resample is not None else self.resample __magic_name__ = do_center_crop if do_center_crop is not None else self.do_center_crop __magic_name__ = crop_size if crop_size is not None else self.crop_size __magic_name__ = get_size_dict(UpperCamelCase__ ) __magic_name__ = do_rescale if do_rescale is not None else self.do_rescale __magic_name__ = rescale_factor if rescale_factor is not None else self.rescale_factor __magic_name__ = do_normalize if do_normalize is not None else self.do_normalize __magic_name__ = image_mean if image_mean is not None else self.image_mean __magic_name__ = image_std if image_std is not None else self.image_std __magic_name__ = make_list_of_images(UpperCamelCase__ ) if not valid_images(UpperCamelCase__ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # All transformations expect numpy arrays. __magic_name__ = [to_numpy_array(UpperCamelCase__ ) for image in images] if do_resize: __magic_name__ = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images] if do_center_crop: __magic_name__ = [self.center_crop(image=UpperCamelCase__ , size=UpperCamelCase__ ) for image in images] if do_rescale: __magic_name__ = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images] if do_normalize: __magic_name__ = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images] __magic_name__ = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images] __magic_name__ = {"""pixel_values""": images} return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
76
1
import random from .binary_exp_mod import bin_exp_mod def a__ ( A_, A_=1000 ): '''simple docstring''' if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd __magic_name__ = n - 1 __magic_name__ = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) __magic_name__ = 0 while count < prec: __magic_name__ = random.randint(2, n - 1 ) __magic_name__ = bin_exp_mod(A_, A_, A_ ) if b != 1: __magic_name__ = True for _ in range(A_ ): if b == n - 1: __magic_name__ = False break __magic_name__ = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": __lowerCAmelCase : Union[str, Any] = abs(int(input('Enter bound : ').strip())) print('Here\'s the list of primes:') print(', '.join(str(i) for i in range(n + 1) if is_prime_big(i)))
76
import math def a__ ( A_, A_ = 0, A_ = 0 ): '''simple docstring''' __magic_name__ = end or len(A_ ) for i in range(A_, A_ ): __magic_name__ = i __magic_name__ = array[i] while temp_index != start and temp_index_value < array[temp_index - 1]: __magic_name__ = array[temp_index - 1] temp_index -= 1 __magic_name__ = temp_index_value return array def a__ ( A_, A_, A_ ): # Max Heap '''simple docstring''' __magic_name__ = index __magic_name__ = 2 * index + 1 # Left Node __magic_name__ = 2 * index + 2 # Right Node if left_index < heap_size and array[largest] < array[left_index]: __magic_name__ = left_index if right_index < heap_size and array[largest] < array[right_index]: __magic_name__ = right_index if largest != index: __magic_name__ , __magic_name__ = array[largest], array[index] heapify(A_, A_, A_ ) def a__ ( A_ ): '''simple docstring''' __magic_name__ = len(A_ ) for i in range(n // 2, -1, -1 ): heapify(A_, A_, A_ ) for i in range(n - 1, 0, -1 ): __magic_name__ , __magic_name__ = array[0], array[i] heapify(A_, 0, A_ ) return array def a__ ( A_, A_, A_, A_ ): '''simple docstring''' if (array[first_index] > array[middle_index]) != ( array[first_index] > array[last_index] ): return array[first_index] elif (array[middle_index] > array[first_index]) != ( array[middle_index] > array[last_index] ): return array[middle_index] else: return array[last_index] def a__ ( A_, A_, A_, A_ ): '''simple docstring''' __magic_name__ = low __magic_name__ = high while True: while array[i] < pivot: i += 1 j -= 1 while pivot < array[j]: j -= 1 if i >= j: return i __magic_name__ , __magic_name__ = array[j], array[i] i += 1 def a__ ( A_ ): '''simple docstring''' if len(A_ ) == 0: return array __magic_name__ = 2 * math.ceil(math.loga(len(A_ ) ) ) __magic_name__ = 16 return intro_sort(A_, 0, len(A_ ), A_, A_ ) def a__ ( A_, A_, A_, A_, A_ ): '''simple docstring''' while end - start > size_threshold: if max_depth == 0: return heap_sort(A_ ) max_depth -= 1 __magic_name__ = median_of_a(A_, A_, start + ((end - start) // 2) + 1, end - 1 ) __magic_name__ = partition(A_, A_, A_, A_ ) intro_sort(A_, A_, A_, A_, A_ ) __magic_name__ = p return insertion_sort(A_, A_, A_ ) if __name__ == "__main__": import doctest doctest.testmod() __lowerCAmelCase : str = input('Enter numbers separated by a comma : ').strip() __lowerCAmelCase : List[Any] = [float(item) for item in user_input.split(',')] print(sort(unsorted))
76
1