code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from ..models.auto import AutoProcessor from ..models.vision_encoder_decoder import VisionEncoderDecoderModel from ..utils import is_vision_available from .base import PipelineTool if is_vision_available(): from PIL import Image class _A ( _a ): """simple docstring""" UpperCAmelCase : str = """naver-clova-ix/donut-base-finetuned-docvqa""" UpperCAmelCase : Tuple = ( """This is a tool that answers a question about an document (pdf). It takes an input named `document` which """ """should be the document containing the information, as well as a `question` that is the question about the """ """document. It returns a text that contains the answer to the question.""" ) UpperCAmelCase : List[str] = """document_qa""" UpperCAmelCase : str = AutoProcessor UpperCAmelCase : Optional[int] = VisionEncoderDecoderModel UpperCAmelCase : int = ["""image""", """text"""] UpperCAmelCase : int = ["""text"""] def __init__( self : Tuple , *__UpperCAmelCase : Union[str, Any] , **__UpperCAmelCase : Any): if not is_vision_available(): raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool.") super().__init__(*__UpperCAmelCase , **__UpperCAmelCase) def __snake_case ( self : Tuple , __UpperCAmelCase : "Image" , __UpperCAmelCase : str): a : Any = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" a : Union[str, Any] = task_prompt.replace("{user_input}" , __UpperCAmelCase) a : Optional[Any] = self.pre_processor.tokenizer( __UpperCAmelCase , add_special_tokens=__UpperCAmelCase , return_tensors="pt").input_ids a : Any = self.pre_processor(__UpperCAmelCase , return_tensors="pt").pixel_values return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values} def __snake_case ( self : int , __UpperCAmelCase : int): return self.model.generate( inputs["pixel_values"].to(self.device) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__UpperCAmelCase , ).sequences def __snake_case ( self : str , __UpperCAmelCase : List[Any]): a : Union[str, Any] = self.pre_processor.batch_decode(__UpperCAmelCase)[0] a : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , "") a : Any = sequence.replace(self.pre_processor.tokenizer.pad_token , "") a : Optional[Any] = re.sub(r"<.*?>" , "" , __UpperCAmelCase , count=1).strip() # remove first task start token a : List[str] = self.pre_processor.tokenajson(__UpperCAmelCase) return sequence["answer"]
40
"""simple docstring""" from __future__ import annotations class snake_case : def __init__( self , __UpperCAmelCase) ->Any: a_ = TypeError( "Matrices must be formed from a list of zero or more lists containing at " "least one and the same number of values, each of which must be of type " "int or float.") if len(__UpperCAmelCase) != 0: a_ = len(rows[0]) if cols == 0: raise error for row in rows: if len(__UpperCAmelCase) != cols: raise error for value in row: if not isinstance(__UpperCAmelCase , (int, float)): raise error a_ = rows else: a_ = [] def UpperCAmelCase__ ( self) ->list[list[int]]: return [[row[i] for row in self.rows] for i in range(len(self.rows[0]))] @property def UpperCAmelCase__ ( self) ->int: return len(self.rows) @property def UpperCAmelCase__ ( self) ->int: return len(self.rows[0]) @property def UpperCAmelCase__ ( self) ->tuple[int, int]: return (self.num_rows, self.num_columns) @property def UpperCAmelCase__ ( self) ->bool: return self.order[0] == self.order[1] def UpperCAmelCase__ ( self) ->Matrix: a_ = [ [0 if column_num != row_num else 1 for column_num in range(self.num_rows)] for row_num in range(self.num_rows) ] return Matrix(__UpperCAmelCase) def UpperCAmelCase__ ( self) ->int: if not self.is_square: return 0 if self.order == (0, 0): return 1 if self.order == (1, 1): return int(self.rows[0][0]) if self.order == (2, 2): return int( (self.rows[0][0] * self.rows[1][1]) - (self.rows[0][1] * self.rows[1][0])) else: return sum( self.rows[0][column] * self.cofactors().rows[0][column] for column in range(self.num_columns)) def UpperCAmelCase__ ( self) ->bool: return bool(self.determinant()) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int: a_ = [ [ self.rows[other_row][other_column] for other_column in range(self.num_columns) if other_column != column ] for other_row in range(self.num_rows) if other_row != row ] return Matrix(__UpperCAmelCase).determinant() def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase) ->int: if (row + column) % 2 == 0: return self.get_minor(__UpperCAmelCase , __UpperCAmelCase) return -1 * self.get_minor(__UpperCAmelCase , __UpperCAmelCase) def UpperCAmelCase__ ( self) ->Matrix: return Matrix( [ [self.get_minor(__UpperCAmelCase , __UpperCAmelCase) for column in range(self.num_columns)] for row in range(self.num_rows) ]) def UpperCAmelCase__ ( self) ->Matrix: return Matrix( [ [ self.minors().rows[row][column] if (row + column) % 2 == 0 else self.minors().rows[row][column] * -1 for column in range(self.minors().num_columns) ] for row in range(self.minors().num_rows) ]) def UpperCAmelCase__ ( self) ->Matrix: a_ = [ [self.cofactors().rows[column][row] for column in range(self.num_columns)] for row in range(self.num_rows) ] return Matrix(__UpperCAmelCase) def UpperCAmelCase__ ( self) ->Matrix: a_ = self.determinant() if not determinant: raise TypeError("Only matrices with a non-zero determinant have an inverse") return self.adjugate() * (1 / determinant) def __repr__( self) ->str: return str(self.rows) def __str__( self) ->str: if self.num_rows == 0: return "[]" if self.num_rows == 1: return "[[" + ". ".join(str(self.rows[0])) + "]]" return ( "[" + "\n ".join( [ "[" + ". ".join([str(__UpperCAmelCase) for value in row]) + ".]" for row in self.rows ]) + "]" ) def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->None: a_ = TypeError("Row must be a list containing all ints and/or floats") if not isinstance(__UpperCAmelCase , __UpperCAmelCase): raise type_error for value in row: if not isinstance(__UpperCAmelCase , (int, float)): raise type_error if len(__UpperCAmelCase) != self.num_columns: raise ValueError( "Row must be equal in length to the other rows in the matrix") if position is None: self.rows.append(__UpperCAmelCase) else: a_ = self.rows[0:position] + [row] + self.rows[position:] def UpperCAmelCase__ ( self , __UpperCAmelCase , __UpperCAmelCase = None) ->None: a_ = TypeError( "Column must be a list containing all ints and/or floats") if not isinstance(__UpperCAmelCase , __UpperCAmelCase): raise type_error for value in column: if not isinstance(__UpperCAmelCase , (int, float)): raise type_error if len(__UpperCAmelCase) != self.num_rows: raise ValueError( "Column must be equal in length to the other columns in the matrix") if position is None: a_ = [self.rows[i] + [column[i]] for i in range(self.num_rows)] else: a_ = [ self.rows[i][0:position] + [column[i]] + self.rows[i][position:] for i in range(self.num_rows) ] def __eq__( self , __UpperCAmelCase) ->bool: if not isinstance(__UpperCAmelCase , __UpperCAmelCase): return NotImplemented return self.rows == other.rows def __ne__( self , __UpperCAmelCase) ->bool: return not self == other def __neg__( self) ->Matrix: return self * -1 def __add__( self , __UpperCAmelCase) ->Matrix: if self.order != other.order: raise ValueError("Addition requires matrices of the same order") return Matrix( [ [self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __sub__( self , __UpperCAmelCase) ->Matrix: if self.order != other.order: raise ValueError("Subtraction requires matrices of the same order") return Matrix( [ [self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns)] for i in range(self.num_rows) ]) def __mul__( self , __UpperCAmelCase) ->Matrix: if isinstance(__UpperCAmelCase , (int, float)): return Matrix( [[int(element * other) for element in row] for row in self.rows]) elif isinstance(__UpperCAmelCase , __UpperCAmelCase): if self.num_columns != other.num_rows: raise ValueError( "The number of columns in the first matrix must " "be equal to the number of rows in the second") return Matrix( [ [Matrix.dot_product(__UpperCAmelCase , __UpperCAmelCase) for column in other.columns()] for row in self.rows ]) else: raise TypeError( "A Matrix can only be multiplied by an int, float, or another matrix") def __pow__( self , __UpperCAmelCase) ->Matrix: if not isinstance(__UpperCAmelCase , __UpperCAmelCase): raise TypeError("A Matrix can only be raised to the power of an int") if not self.is_square: raise ValueError("Only square matrices can be raised to a power") if other == 0: return self.identity() if other < 0: if self.is_invertable(): return self.inverse() ** (-other) raise ValueError( "Only invertable matrices can be raised to a negative power") a_ = self for _ in range(other - 1): result *= self return result @classmethod def UpperCAmelCase__ ( cls , __UpperCAmelCase , __UpperCAmelCase) ->int: return sum(row[i] * column[i] for i in range(len(__UpperCAmelCase))) if __name__ == "__main__": import doctest doctest.testmod()
243
0
'''simple docstring''' from typing import Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING __snake_case : Dict = logging.get_logger(__name__) @add_end_docstrings(lowerCamelCase_ ) class __UpperCAmelCase ( lowerCamelCase_ ): '''simple docstring''' def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> List[str]: super().__init__(*_UpperCAmelCase , **_UpperCAmelCase ) self.check_model_type(_UpperCAmelCase ) def __A ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ) -> Dict: A_ ,A_ = {}, {} if padding is not None: A_ = padding if truncation is not None: A_ = truncation if top_k is not None: A_ = top_k return preprocess_params, {}, postprocess_params def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ) -> Dict: if isinstance(_UpperCAmelCase , (Image.Image, str) ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ): A_ = {'''image''': image, '''question''': question} else: A_ = image A_ = super().__call__(_UpperCAmelCase , **_UpperCAmelCase ) return results def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ) -> int: A_ = load_image(inputs['''image'''] ) A_ = self.tokenizer( inputs['''question'''] , return_tensors=self.framework , padding=_UpperCAmelCase , truncation=_UpperCAmelCase ) A_ = self.image_processor(images=_UpperCAmelCase , return_tensors=self.framework ) model_inputs.update(_UpperCAmelCase ) return model_inputs def __A ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]: A_ = self.model(**_UpperCAmelCase ) return model_outputs def __A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ) -> List[str]: if top_k > self.model.config.num_labels: A_ = self.model.config.num_labels if self.framework == "pt": A_ = model_outputs.logits.sigmoid()[0] A_ ,A_ = probs.topk(_UpperCAmelCase ) else: raise ValueError(F'''Unsupported framework: {self.framework}''' ) A_ = scores.tolist() A_ = ids.tolist() return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_UpperCAmelCase , _UpperCAmelCase )]
366
'''simple docstring''' from diffusers.utils.testing_utils import require_onnxruntime @require_onnxruntime class __UpperCAmelCase : '''simple docstring''' pass
18
0
import math def UpperCamelCase__( UpperCamelCase__ : int )->bool: assert isinstance(UpperCamelCase__ , UpperCamelCase__ ) and ( number >= 0 ), "'number' must been an int and positive" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or not number % 2: # Negatives, 0, 1 and all even numbers are not primes return False A__ = range(3 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 2 ) return not any(not number % i for i in odd_numbers ) def UpperCamelCase__( UpperCamelCase__ : Any , UpperCamelCase__ : List[Any]=1 , **UpperCamelCase__ : List[str] )->str: A__ = factor * value A__ = value while not is_prime(UpperCamelCase__ ): value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1 if value == first_value_val: return next_prime(value + 1 , **UpperCamelCase__ ) return value
193
import numpy as np def UpperCamelCase__( UpperCamelCase__ : np.array )->np.array: return (2 / (1 + np.exp(-2 * vector ))) - 1 if __name__ == "__main__": import doctest doctest.testmod()
193
1
'''simple docstring''' import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : int ) -> Tuple: # Initialise PyTorch model snake_case = MobileBertConfig.from_json_file(__lowerCAmelCase ) print(F'''Building PyTorch model from configuration: {config}''' ) snake_case = MobileBertForPreTraining(__lowerCAmelCase ) # Load weights from tf checkpoint snake_case = load_tf_weights_in_mobilebert(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , __lowerCAmelCase ) if __name__ == "__main__": _SCREAMING_SNAKE_CASE = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--mobilebert_config_file", default=None, type=str, required=True, help=( "The config json file corresponding to the pre-trained MobileBERT model. \n" "This specifies the model architecture." ), ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _SCREAMING_SNAKE_CASE = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
3
'''simple docstring''' def __lowerCamelCase ( __lowerCAmelCase : int ) -> int: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""multiplicative_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""multiplicative_persistence() does not accept negative values""" ) snake_case = 0 snake_case = str(__lowerCAmelCase ) while len(__lowerCAmelCase ) != 1: snake_case = [int(__lowerCAmelCase ) for i in num_string] snake_case = 1 for i in range(0 , len(__lowerCAmelCase ) ): total *= numbers[i] snake_case = str(__lowerCAmelCase ) steps += 1 return steps def __lowerCamelCase ( __lowerCAmelCase : int ) -> int: if not isinstance(__lowerCAmelCase , __lowerCAmelCase ): raise ValueError("""additive_persistence() only accepts integral values""" ) if num < 0: raise ValueError("""additive_persistence() does not accept negative values""" ) snake_case = 0 snake_case = str(__lowerCAmelCase ) while len(__lowerCAmelCase ) != 1: snake_case = [int(__lowerCAmelCase ) for i in num_string] snake_case = 0 for i in range(0 , len(__lowerCAmelCase ) ): total += numbers[i] snake_case = str(__lowerCAmelCase ) steps += 1 return steps if __name__ == "__main__": import doctest doctest.testmod()
3
1
"""simple docstring""" from __future__ import annotations import unittest from transformers import EsmConfig, is_tf_available from transformers.testing_utils import require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import numpy import tensorflow as tf from transformers.models.esm.modeling_tf_esm import ( TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, TFEsmModel, ) class A_ : '''simple docstring''' def __init__( self , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Optional[int] = parent UpperCAmelCase_ : Any = 13 UpperCAmelCase_ : Optional[int] = 7 UpperCAmelCase_ : Tuple = True UpperCAmelCase_ : int = True UpperCAmelCase_ : Any = True UpperCAmelCase_ : Optional[int] = 99 UpperCAmelCase_ : Tuple = 32 UpperCAmelCase_ : Union[str, Any] = 2 UpperCAmelCase_ : Optional[Any] = 4 UpperCAmelCase_ : List[str] = 37 UpperCAmelCase_ : Optional[int] = "gelu" UpperCAmelCase_ : Union[str, Any] = 0.1 UpperCAmelCase_ : Optional[int] = 0.1 UpperCAmelCase_ : Dict = 512 UpperCAmelCase_ : str = 16 UpperCAmelCase_ : Any = 2 UpperCAmelCase_ : int = 0.02 UpperCAmelCase_ : str = 3 UpperCAmelCase_ : Optional[Any] = 4 UpperCAmelCase_ : List[str] = None def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ : str = None if self.use_input_mask: UpperCAmelCase_ : Dict = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ : List[str] = None UpperCAmelCase_ : Tuple = None UpperCAmelCase_ : Optional[Any] = None if self.use_labels: UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ : Dict = EsmConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__ ( self ): """simple docstring""" ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : Union[str, Any] = self.prepare_config_and_inputs() UpperCAmelCase_ : List[str] = True UpperCAmelCase_ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = TFEsmModel(config=lowercase_ ) UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : Union[str, Any] = model(lowercase_ ) UpperCAmelCase_ : Optional[Any] = [input_ids, input_mask] UpperCAmelCase_ : Tuple = model(lowercase_ ) UpperCAmelCase_ : List[str] = model(lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ): """simple docstring""" UpperCAmelCase_ : Dict = True UpperCAmelCase_ : str = TFEsmModel(config=lowercase_ ) UpperCAmelCase_ : str = { "input_ids": input_ids, "attention_mask": input_mask, "encoder_hidden_states": encoder_hidden_states, "encoder_attention_mask": encoder_attention_mask, } UpperCAmelCase_ : Any = model(lowercase_ ) UpperCAmelCase_ : Optional[Any] = [input_ids, input_mask] UpperCAmelCase_ : str = model(lowercase_ , encoder_hidden_states=lowercase_ ) # Also check the case where encoder outputs are not passed UpperCAmelCase_ : Optional[int] = model(lowercase_ , attention_mask=lowercase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : Dict = TFEsmForMaskedLM(config=lowercase_ ) UpperCAmelCase_ : List[Any] = model([input_ids, input_mask] ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ): """simple docstring""" UpperCAmelCase_ : List[Any] = self.num_labels UpperCAmelCase_ : Union[str, Any] = TFEsmForTokenClassification(config=lowercase_ ) UpperCAmelCase_ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask} UpperCAmelCase_ : List[str] = model(lowercase_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) : List[str] = config_and_inputs UpperCAmelCase_ : str = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = ( ( TFEsmModel, TFEsmForMaskedLM, TFEsmForSequenceClassification, TFEsmForTokenClassification, ) if is_tf_available() else () ) SCREAMING_SNAKE_CASE__ : Optional[Any] = ( { """feature-extraction""": TFEsmModel, """fill-mask""": TFEsmForMaskedLM, """text-classification""": TFEsmForSequenceClassification, """token-classification""": TFEsmForTokenClassification, """zero-shot""": TFEsmForSequenceClassification, } if is_tf_available() else {} ) SCREAMING_SNAKE_CASE__ : int = False SCREAMING_SNAKE_CASE__ : List[str] = False def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = TFEsmModelTester(self ) UpperCAmelCase_ : Optional[Any] = ConfigTester(self , config_class=lowercase_ , hidden_size=37 ) def UpperCamelCase__ ( self ): """simple docstring""" self.config_tester.run_common_tests() def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase_ ) def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase_ ) @slow def UpperCamelCase__ ( self ): """simple docstring""" for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase_ : Optional[Any] = TFEsmModel.from_pretrained(lowercase_ ) self.assertIsNotNone(lowercase_ ) @unittest.skip("Protein models do not support embedding resizing." ) def UpperCamelCase__ ( self ): """simple docstring""" pass @unittest.skip("Protein models do not support embedding resizing." ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ ) assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer ) if model_class is TFEsmForMaskedLM: # Output embedding test differs from the main test because they're a matrix, not a layer UpperCAmelCase_ : List[str] = model.get_bias() assert isinstance(lowercase_ , lowercase_ ) for k, v in name.items(): assert isinstance(lowercase_ , tf.Variable ) else: UpperCAmelCase_ : Union[str, Any] = model.get_output_embeddings() assert x is None UpperCAmelCase_ : Optional[int] = model.get_bias() assert name is None @require_tf class A_ (unittest.TestCase ): '''simple docstring''' @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = TFEsmForMaskedLM.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : int = tf.constant([[0, 1, 2, 3, 4, 5]] ) UpperCAmelCase_ : Dict = model(lowercase_ )[0] UpperCAmelCase_ : List[Any] = [1, 6, 33] self.assertEqual(list(output.numpy().shape ) , lowercase_ ) # compare the actual values for a slice. UpperCAmelCase_ : Tuple = tf.constant( [ [ [8.92_15_18, -10.58_98_14, -6.4_67_13_07], [-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15], [-7.78_12_47, -13.95_15_57, -3.74_05_92], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) ) @slow def UpperCamelCase__ ( self ): """simple docstring""" UpperCAmelCase_ : Union[str, Any] = TFEsmModel.from_pretrained("facebook/esm2_t6_8M_UR50D" ) UpperCAmelCase_ : Optional[int] = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) UpperCAmelCase_ : Union[str, Any] = model(lowercase_ )[0] # compare the actual values for a slice. UpperCAmelCase_ : Tuple = tf.constant( [ [ [0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39], [0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22], [0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28], ] ] ) self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
61
"""simple docstring""" import argparse import os import re # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_dummies.py _a = 'src/diffusers' # Matches is_xxx_available() _a = re.compile(R'is\_([a-z_]*)_available\(\)') # Matches from xxx import bla _a = re.compile(R'\s+from\s+\S*\s+import\s+([^\(\s].*)\n') _a = '\n{0} = None\n' _a = '\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n\n @classmethod\n def from_config(cls, *args, **kwargs):\n requires_backends(cls, {1})\n\n @classmethod\n def from_pretrained(cls, *args, **kwargs):\n requires_backends(cls, {1})\n' _a = '\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n' def __a ( __lowerCamelCase ): UpperCAmelCase_ : int = _re_backend.findall(__lowerCamelCase ) if len(__lowerCamelCase ) == 0: return None return "_and_".join(__lowerCamelCase ) def __a ( ): with open(os.path.join(__lowerCamelCase, "__init__.py" ), "r", encoding="utf-8", newline="\n" ) as f: UpperCAmelCase_ : Optional[int] = f.readlines() # Get to the point we do the actual imports for type checking UpperCAmelCase_ : Union[str, Any] = 0 UpperCAmelCase_ : Optional[int] = {} # Go through the end of the file while line_index < len(__lowerCamelCase ): # If the line contains is_backend_available, we grab all objects associated with the `else` block UpperCAmelCase_ : Union[str, Any] = find_backend(lines[line_index] ) if backend is not None: while not lines[line_index].startswith("else:" ): line_index += 1 line_index += 1 UpperCAmelCase_ : List[str] = [] # Until we unindent, add backend objects to the list while line_index < len(__lowerCamelCase ) and len(lines[line_index] ) > 1: UpperCAmelCase_ : Union[str, Any] = lines[line_index] UpperCAmelCase_ : Optional[Any] = _re_single_line_import.search(__lowerCamelCase ) if single_line_import_search is not None: objects.extend(single_line_import_search.groups()[0].split(", " ) ) elif line.startswith(" " * 8 ): objects.append(line[8:-2] ) line_index += 1 if len(__lowerCamelCase ) > 0: UpperCAmelCase_ : Optional[int] = objects else: line_index += 1 return backend_specific_objects def __a ( __lowerCamelCase, __lowerCamelCase ): if name.isupper(): return DUMMY_CONSTANT.format(__lowerCamelCase ) elif name.islower(): return DUMMY_FUNCTION.format(__lowerCamelCase, __lowerCamelCase ) else: return DUMMY_CLASS.format(__lowerCamelCase, __lowerCamelCase ) def __a ( __lowerCamelCase=None ): if backend_specific_objects is None: UpperCAmelCase_ : Tuple = read_init() # For special correspondence backend to module name as used in the function requires_modulename UpperCAmelCase_ : str = {} for backend, objects in backend_specific_objects.items(): UpperCAmelCase_ : int = "[" + ", ".join(f"""\"{b}\"""" for b in backend.split("_and_" ) ) + "]" UpperCAmelCase_ : Dict = "# This file is autogenerated by the command `make fix-copies`, do not edit.\n" dummy_file += "from ..utils import DummyObject, requires_backends\n\n" dummy_file += "\n".join([create_dummy_object(__lowerCamelCase, __lowerCamelCase ) for o in objects] ) UpperCAmelCase_ : int = dummy_file return dummy_files def __a ( __lowerCamelCase=False ): UpperCAmelCase_ : Optional[Any] = create_dummy_files() # For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py UpperCAmelCase_ : Union[str, Any] = {"torch": "pt"} # Locate actual dummy modules and read their content. UpperCAmelCase_ : List[str] = os.path.join(__lowerCamelCase, "utils" ) UpperCAmelCase_ : Optional[int] = { backend: os.path.join(__lowerCamelCase, f"""dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py""" ) for backend in dummy_files.keys() } UpperCAmelCase_ : Any = {} for backend, file_path in dummy_file_paths.items(): if os.path.isfile(__lowerCamelCase ): with open(__lowerCamelCase, "r", encoding="utf-8", newline="\n" ) as f: UpperCAmelCase_ : Optional[int] = f.read() else: UpperCAmelCase_ : Any = "" for backend in dummy_files.keys(): if dummy_files[backend] != actual_dummies[backend]: if overwrite: print( f"""Updating diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py as the main """ "__init__ has new objects." ) with open(dummy_file_paths[backend], "w", encoding="utf-8", newline="\n" ) as f: f.write(dummy_files[backend] ) else: raise ValueError( "The main __init__ has objects that are not present in " f"""diffusers.utils.dummy_{short_names.get(__lowerCamelCase, __lowerCamelCase )}_objects.py. Run `make fix-copies` """ "to fix this." ) if __name__ == "__main__": _a = argparse.ArgumentParser() parser.add_argument('--fix_and_overwrite', action='store_true', help='Whether to fix inconsistencies.') _a = parser.parse_args() check_dummies(args.fix_and_overwrite)
61
1
import gc import unittest import numpy as np import torch from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS, UNCONDITIONAL_AUDIO_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _a ( _lowerCAmelCase , unittest.TestCase ): A = DanceDiffusionPipeline A = UNCONDITIONAL_AUDIO_GENERATION_PARAMS A = PipelineTesterMixin.required_optional_params - { '''callback''', '''latents''', '''callback_steps''', '''output_type''', '''num_images_per_prompt''', } A = UNCONDITIONAL_AUDIO_GENERATION_BATCH_PARAMS A = False A = False def __snake_case (self ) -> Optional[Any]: torch.manual_seed(0 ) UpperCAmelCase_: Dict = UNetaDModel( block_out_channels=(32, 32, 64), extra_in_channels=16, sample_size=512, sample_rate=16000, in_channels=2, out_channels=2, flip_sin_to_cos=SCREAMING_SNAKE_CASE_, use_timestep_embedding=SCREAMING_SNAKE_CASE_, time_embedding_type="""fourier""", mid_block_type="""UNetMidBlock1D""", down_block_types=("""DownBlock1DNoSkip""", """DownBlock1D""", """AttnDownBlock1D"""), up_block_types=("""AttnUpBlock1D""", """UpBlock1D""", """UpBlock1DNoSkip"""), ) UpperCAmelCase_: Any = IPNDMScheduler() UpperCAmelCase_: List[str] = { """unet""": unet, """scheduler""": scheduler, } return components def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> List[str]: if str(SCREAMING_SNAKE_CASE_ ).startswith("""mps""" ): UpperCAmelCase_: List[str] = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCAmelCase_: Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = { """batch_size""": 1, """generator""": generator, """num_inference_steps""": 4, } return inputs def __snake_case (self ) -> Dict: UpperCAmelCase_: List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase_: Optional[Any] = self.get_dummy_components() UpperCAmelCase_: List[str] = DanceDiffusionPipeline(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: List[Any] = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: int = self.get_dummy_inputs(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Optional[Any] = pipe(**SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: int = output.audios UpperCAmelCase_: List[str] = audio[0, -3:, -3:] assert audio.shape == (1, 2, components["unet"].sample_size) UpperCAmelCase_: List[Any] = np.array([-0.7_2_6_5, 1.0_0_0_0, -0.8_3_8_8, 0.1_1_7_5, 0.9_4_9_8, -1.0_0_0_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 @skip_mps def __snake_case (self ) -> Any: return super().test_save_load_local() @skip_mps def __snake_case (self ) -> Any: return super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 ) @skip_mps def __snake_case (self ) -> Dict: return super().test_save_load_optional_components() @skip_mps def __snake_case (self ) -> Dict: return super().test_attention_slicing_forward_pass() def __snake_case (self ) -> Optional[int]: super().test_inference_batch_single_identical(expected_max_diff=3E-3 ) @slow @require_torch_gpu class _a ( unittest.TestCase ): def __snake_case (self ) -> Any: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __snake_case (self ) -> List[Any]: UpperCAmelCase_: Any = torch_device UpperCAmelCase_: Tuple = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""" ) UpperCAmelCase_: int = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Dict = torch.manual_seed(0 ) UpperCAmelCase_: Union[str, Any] = pipe(generator=SCREAMING_SNAKE_CASE_, num_inference_steps=100, audio_length_in_s=4.0_9_6 ) UpperCAmelCase_: Optional[Any] = output.audios UpperCAmelCase_: List[Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) UpperCAmelCase_: Optional[Any] = np.array([-0.0_1_9_2, -0.0_2_3_1, -0.0_3_1_8, -0.0_0_5_9, 0.0_0_0_2, -0.0_0_2_0] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2 def __snake_case (self ) -> Union[str, Any]: UpperCAmelCase_: Dict = torch_device UpperCAmelCase_: Optional[Any] = DanceDiffusionPipeline.from_pretrained("""harmonai/maestro-150k""", torch_dtype=torch.floataa ) UpperCAmelCase_: Dict = pipe.to(SCREAMING_SNAKE_CASE_ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ ) UpperCAmelCase_: Union[str, Any] = torch.manual_seed(0 ) UpperCAmelCase_: int = pipe(generator=SCREAMING_SNAKE_CASE_, num_inference_steps=100, audio_length_in_s=4.0_9_6 ) UpperCAmelCase_: int = output.audios UpperCAmelCase_: Union[str, Any] = audio[0, -3:, -3:] assert audio.shape == (1, 2, pipe.unet.sample_size) UpperCAmelCase_: Dict = np.array([-0.0_3_6_7, -0.0_4_8_8, -0.0_7_7_1, -0.0_5_2_5, -0.0_4_4_4, -0.0_3_4_1] ) assert np.abs(audio_slice.flatten() - expected_slice ).max() < 1E-2
82
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging a : Tuple = logging.get_logger(__name__) a : Optional[Any] = { 'facebook/encodec_24khz': 'https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json', 'facebook/encodec_48khz': 'https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json', } class _a ( _lowerCAmelCase ): A = '''encodec''' def __init__(self, SCREAMING_SNAKE_CASE_=[1.5, 3.0, 6.0, 1_2.0, 2_4.0], SCREAMING_SNAKE_CASE_=24000, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=False, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=128, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=1, SCREAMING_SNAKE_CASE_=[8, 5, 4, 2], SCREAMING_SNAKE_CASE_="weight_norm", SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=7, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_="reflect", SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=1.0, SCREAMING_SNAKE_CASE_=1024, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=True, **SCREAMING_SNAKE_CASE_, ) -> Optional[int]: UpperCAmelCase_: List[Any] = target_bandwidths UpperCAmelCase_: str = sampling_rate UpperCAmelCase_: Any = audio_channels UpperCAmelCase_: List[str] = normalize UpperCAmelCase_: List[Any] = chunk_length_s UpperCAmelCase_: List[Any] = overlap UpperCAmelCase_: Any = hidden_size UpperCAmelCase_: str = num_filters UpperCAmelCase_: Any = num_residual_layers UpperCAmelCase_: int = upsampling_ratios UpperCAmelCase_: Tuple = norm_type UpperCAmelCase_: Union[str, Any] = kernel_size UpperCAmelCase_: str = last_kernel_size UpperCAmelCase_: Union[str, Any] = residual_kernel_size UpperCAmelCase_: str = dilation_growth_rate UpperCAmelCase_: int = use_causal_conv UpperCAmelCase_: int = pad_mode UpperCAmelCase_: List[Any] = compress UpperCAmelCase_: Dict = num_lstm_layers UpperCAmelCase_: List[Any] = trim_right_ratio UpperCAmelCase_: List[Any] = codebook_size UpperCAmelCase_: List[Any] = codebook_dim if codebook_dim is not None else hidden_size UpperCAmelCase_: Optional[Any] = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f'self.norm_type must be one of `"weight_norm"`, `"time_group_norm"`), got {self.norm_type}' ) super().__init__(**SCREAMING_SNAKE_CASE_ ) @property def __snake_case (self ) -> Optional[int]: if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate ) @property def __snake_case (self ) -> Optional[int]: if self.chunk_length_s is None or self.overlap is None: return None else: return max(1, int((1.0 - self.overlap) * self.chunk_length ) ) @property def __snake_case (self ) -> int: UpperCAmelCase_: Optional[int] = np.prod(self.upsampling_ratios ) return math.ceil(self.sampling_rate / hop_length ) @property def __snake_case (self ) -> int: return int(1000 * self.target_bandwidths[-1] // (self.frame_rate * 10) )
82
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging __snake_case = logging.get_logger(__name__) __snake_case = { """facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""", """facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""", """facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""", """facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""", """facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""", """facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""", """facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""", """facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""", """facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""", } class lowercase__ ( _UpperCAmelCase ): A__ : int ="""xmod""" def __init__( self : Optional[Any] , UpperCAmelCase_ : Optional[int]=30522 , UpperCAmelCase_ : Union[str, Any]=768 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[str]=12 , UpperCAmelCase_ : List[str]=3072 , UpperCAmelCase_ : Dict="gelu" , UpperCAmelCase_ : str=0.1 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : Any=2 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : Optional[Any]=1e-1_2 , UpperCAmelCase_ : Any=1 , UpperCAmelCase_ : Dict=0 , UpperCAmelCase_ : Optional[int]=2 , UpperCAmelCase_ : Tuple="absolute" , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Dict=None , UpperCAmelCase_ : Union[str, Any]=False , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : int=False , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : List[Any]=True , UpperCAmelCase_ : Union[str, Any]=("en_XX",) , UpperCAmelCase_ : int=None , **UpperCAmelCase_ : str , ): super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = vocab_size SCREAMING_SNAKE_CASE__ = hidden_size SCREAMING_SNAKE_CASE__ = num_hidden_layers SCREAMING_SNAKE_CASE__ = num_attention_heads SCREAMING_SNAKE_CASE__ = hidden_act SCREAMING_SNAKE_CASE__ = intermediate_size SCREAMING_SNAKE_CASE__ = hidden_dropout_prob SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ = max_position_embeddings SCREAMING_SNAKE_CASE__ = type_vocab_size SCREAMING_SNAKE_CASE__ = initializer_range SCREAMING_SNAKE_CASE__ = layer_norm_eps SCREAMING_SNAKE_CASE__ = position_embedding_type SCREAMING_SNAKE_CASE__ = use_cache SCREAMING_SNAKE_CASE__ = classifier_dropout SCREAMING_SNAKE_CASE__ = pre_norm SCREAMING_SNAKE_CASE__ = adapter_reduction_factor SCREAMING_SNAKE_CASE__ = adapter_layer_norm SCREAMING_SNAKE_CASE__ = adapter_reuse_layer_norm SCREAMING_SNAKE_CASE__ = ln_before_adapter SCREAMING_SNAKE_CASE__ = list(UpperCAmelCase_ ) SCREAMING_SNAKE_CASE__ = default_language class lowercase__ ( _UpperCAmelCase ): @property def A_ ( self : List[Any] ): if self.task == "multiple-choice": SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'choice', 2: 'sequence'} else: SCREAMING_SNAKE_CASE__ = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
176
def _lowercase ( ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ = [] SCREAMING_SNAKE_CASE__ = 1 while len(UpperCamelCase_ ) < 1e6: constant.append(str(UpperCamelCase_ ) ) i += 1 SCREAMING_SNAKE_CASE__ = ''.join(UpperCamelCase_ ) return ( int(constant[0] ) * int(constant[9] ) * int(constant[99] ) * int(constant[999] ) * int(constant[9999] ) * int(constant[99999] ) * int(constant[999999] ) ) if __name__ == "__main__": print(solution())
176
1
"""simple docstring""" import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import Callable, Dict, List, Tuple import timm import torch import torch.nn as nn from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf from huggingface_hub import cached_download, hf_hub_url from torch import Tensor from vissl.models.model_helpers import get_trunk_forward_outputs from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel from transformers.utils import logging logging.set_verbosity_info() __SCREAMING_SNAKE_CASE : int = logging.get_logger() @dataclass class __A : '''simple docstring''' __lowercase: nn.Module __lowercase: List[nn.Module] = field(default_factory=snake_case__) __lowercase: list = field(default_factory=snake_case__) def lowerCAmelCase ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : Tensor , UpperCAmelCase_ : Tensor ) ->List[Any]: """simple docstring""" snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(UpperCAmelCase_ , nn.Convad ) or isinstance(UpperCAmelCase_ , nn.BatchNormad ) if has_not_submodules: self.traced.append(UpperCAmelCase_ ) def __call__( self : int , UpperCAmelCase_ : Tensor ) ->Optional[int]: """simple docstring""" for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(UpperCAmelCase_ ) [x.remove() for x in self.handles] return self @property def lowerCAmelCase ( self : Tuple ) ->List[str]: """simple docstring""" return list(filter(lambda UpperCAmelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class __A : '''simple docstring''' __lowercase: nn.Module __lowercase: nn.Module __lowercase: int = 1 __lowercase: List = field(default_factory=snake_case__) __lowercase: List = field(default_factory=snake_case__) __lowercase: bool = True def __call__( self : Optional[int] , UpperCAmelCase_ : Tensor ) ->List[Any]: """simple docstring""" snake_case_ = Tracker(self.dest )(UpperCAmelCase_ ).parametrized snake_case_ = Tracker(self.src )(UpperCAmelCase_ ).parametrized snake_case_ = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.src_skip , UpperCAmelCase_ ) ) snake_case_ = list(filter(lambda UpperCAmelCase_ : type(UpperCAmelCase_ ) not in self.dest_skip , UpperCAmelCase_ ) ) if len(UpperCAmelCase_ ) != len(UpperCAmelCase_ ) and self.raise_if_mismatch: raise Exception( F"""Numbers of operations are different. Source module has {len(UpperCAmelCase_ )} operations while""" F""" destination module has {len(UpperCAmelCase_ )}.""" ) for dest_m, src_m in zip(UpperCAmelCase_ , UpperCAmelCase_ ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F"""Transfered from={src_m} to={dest_m}""" ) class __A (nn.Module): '''simple docstring''' def __init__( self : Optional[int] , UpperCAmelCase_ : nn.Module ) ->Union[str, Any]: """simple docstring""" super().__init__() snake_case_ = [] # - get the stem feature_blocks.append(("""conv1""", model.stem) ) # - get all the feature blocks for k, v in model.trunk_output.named_children(): assert k.startswith("""block""" ), F"""Unexpected layer name {k}""" snake_case_ = len(UpperCAmelCase_ ) + 1 feature_blocks.append((F"""res{block_index}""", v) ) snake_case_ = nn.ModuleDict(UpperCAmelCase_ ) def lowerCAmelCase ( self : List[Any] , UpperCAmelCase_ : Tensor ) ->Union[str, Any]: """simple docstring""" return get_trunk_forward_outputs( UpperCAmelCase_ , out_feat_keys=UpperCAmelCase_ , feature_blocks=self._feature_blocks , ) class __A (snake_case__): '''simple docstring''' def lowerCAmelCase ( self : Any , UpperCAmelCase_ : str ) ->str: """simple docstring""" snake_case_ = x.split("""-""" ) return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] ) def __getitem__( self : Optional[int] , UpperCAmelCase_ : str ) ->Callable[[], Tuple[nn.Module, Dict]]: """simple docstring""" if x not in self: snake_case_ = self.convert_name_to_timm(UpperCAmelCase_ ) snake_case_ = partial(lambda: (timm.create_model(UpperCAmelCase_ , pretrained=UpperCAmelCase_ ).eval(), None) ) else: snake_case_ = super().__getitem__(UpperCAmelCase_ ) return val class __A (snake_case__): '''simple docstring''' def __getitem__( self : List[str] , UpperCAmelCase_ : str ) ->Callable[[], nn.Module]: """simple docstring""" if "seer" in x and "in1k" not in x: snake_case_ = RegNetModel else: snake_case_ = RegNetForImageClassification return val def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple: for from_key, to_key in keys: snake_case_ = from_state_dict[from_key].clone() print(f"""Copied key={from_key} to={to_key}""" ) return to_state_dict def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ) -> Any: print(f"""Converting {name}...""" ) with torch.no_grad(): snake_case_ , snake_case_ = from_model_func() snake_case_ = our_model_func(_SCREAMING_SNAKE_CASE ).eval() snake_case_ = ModuleTransfer(src=_SCREAMING_SNAKE_CASE , dest=_SCREAMING_SNAKE_CASE , raise_if_mismatch=_SCREAMING_SNAKE_CASE ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(_SCREAMING_SNAKE_CASE ) if from_state_dict is not None: snake_case_ = [] # for seer - in1k finetuned we have to manually copy the head if "seer" in name and "in1k" in name: snake_case_ = [("""0.clf.0.weight""", """classifier.1.weight"""), ("""0.clf.0.bias""", """classifier.1.bias""")] snake_case_ = manually_copy_vissl_head(_SCREAMING_SNAKE_CASE , our_model.state_dict() , _SCREAMING_SNAKE_CASE ) our_model.load_state_dict(_SCREAMING_SNAKE_CASE ) snake_case_ = our_model(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE ) snake_case_ = ( our_outputs.logits if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state ) snake_case_ = from_model(_SCREAMING_SNAKE_CASE ) snake_case_ = from_output[-1] if type(_SCREAMING_SNAKE_CASE ) is list else from_output # now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state if "seer" in name and "in1k" in name: snake_case_ = our_outputs.hidden_states[-1] assert torch.allclose(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ), "The model logits don't match the original one." if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / name , commit_message="""Add model""" , use_temp_dir=_SCREAMING_SNAKE_CASE , ) snake_case_ = 224 if """seer""" not in name else 384 # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained("""facebook/convnext-base-224-22k-1k""" , size=_SCREAMING_SNAKE_CASE ) image_processor.push_to_hub( repo_path_or_name=save_directory / name , commit_message="""Add image processor""" , use_temp_dir=_SCREAMING_SNAKE_CASE , ) print(f"""Pushed {name}""" ) def _a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = True ) -> Tuple: snake_case_ = """imagenet-1k-id2label.json""" snake_case_ = 1_000 snake_case_ = (1, num_labels) snake_case_ = """huggingface/label-files""" snake_case_ = num_labels snake_case_ = json.load(open(cached_download(hf_hub_url(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) ) snake_case_ = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(_SCREAMING_SNAKE_CASE , num_labels=_SCREAMING_SNAKE_CASE , idalabel=_SCREAMING_SNAKE_CASE , labelaid=_SCREAMING_SNAKE_CASE ) snake_case_ = { """regnet-x-002""": ImageNetPreTrainedConfig( depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="""x""" ), """regnet-x-004""": ImageNetPreTrainedConfig( depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="""x""" ), """regnet-x-006""": ImageNetPreTrainedConfig( depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="""x""" ), """regnet-x-008""": ImageNetPreTrainedConfig( depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="""x""" ), """regnet-x-016""": ImageNetPreTrainedConfig( depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="""x""" ), """regnet-x-032""": ImageNetPreTrainedConfig( depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1_008] , groups_width=48 , layer_type="""x""" ), """regnet-x-040""": ImageNetPreTrainedConfig( depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1_360] , groups_width=40 , layer_type="""x""" ), """regnet-x-064""": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1_624] , groups_width=56 , layer_type="""x""" ), """regnet-x-080""": ImageNetPreTrainedConfig( depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1_920] , groups_width=120 , layer_type="""x""" ), """regnet-x-120""": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 , layer_type="""x""" ), """regnet-x-160""": ImageNetPreTrainedConfig( depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2_048] , groups_width=128 , layer_type="""x""" ), """regnet-x-320""": ImageNetPreTrainedConfig( depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1_344, 2_520] , groups_width=168 , layer_type="""x""" ), # y variant """regnet-y-002""": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ), """regnet-y-004""": ImageNetPreTrainedConfig( depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ), """regnet-y-006""": ImageNetPreTrainedConfig( depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ), """regnet-y-008""": ImageNetPreTrainedConfig( depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ), """regnet-y-016""": ImageNetPreTrainedConfig( depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ), """regnet-y-032""": ImageNetPreTrainedConfig( depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1_512] , groups_width=24 ), """regnet-y-040""": ImageNetPreTrainedConfig( depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1_088] , groups_width=64 ), """regnet-y-064""": ImageNetPreTrainedConfig( depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1_296] , groups_width=72 ), """regnet-y-080""": ImageNetPreTrainedConfig( depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2_016] , groups_width=56 ), """regnet-y-120""": ImageNetPreTrainedConfig( depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2_240] , groups_width=112 ), """regnet-y-160""": ImageNetPreTrainedConfig( depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1_232, 3_024] , groups_width=112 ), """regnet-y-320""": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), # models created by SEER -> https://arxiv.org/abs/2202.08360 """regnet-y-320-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), """regnet-y-640-seer""": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ), """regnet-y-1280-seer""": RegNetConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ), """regnet-y-2560-seer""": RegNetConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ), """regnet-y-10b-seer""": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ), # finetuned on imagenet """regnet-y-320-seer-in1k""": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1_392, 3_712] , groups_width=232 ), """regnet-y-640-seer-in1k""": ImageNetPreTrainedConfig( depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1_968, 4_920] , groups_width=328 ), """regnet-y-1280-seer-in1k""": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[528, 1_056, 2_904, 7_392] , groups_width=264 ), """regnet-y-2560-seer-in1k""": ImageNetPreTrainedConfig( depths=[3, 7, 16, 1] , hidden_sizes=[640, 1_696, 2_544, 5_088] , groups_width=640 ), """regnet-y-10b-seer-in1k""": ImageNetPreTrainedConfig( depths=[2, 7, 17, 1] , hidden_sizes=[2_020, 4_040, 11_110, 28_280] , groups_width=1_010 ), } snake_case_ = NameToOurModelFuncMap() snake_case_ = NameToFromModelFuncMap() # add seer weights logic def load_using_classy_vision(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple[nn.Module, Dict]: snake_case_ = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , model_dir=str(_SCREAMING_SNAKE_CASE ) , map_location="""cpu""" ) snake_case_ = model_func() # check if we have a head, if yes add it snake_case_ = files["""classy_state_dict"""]["""base_model"""]["""model"""] snake_case_ = model_state_dict["""trunk"""] model.load_state_dict(_SCREAMING_SNAKE_CASE ) return model.eval(), model_state_dict["heads"] # pretrained snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch""" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , ) # IN1K finetuned snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , ) snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch""" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , ) snake_case_ = partial( _SCREAMING_SNAKE_CASE , """https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch""" , lambda: FakeRegNetVisslWrapper( RegNet(RegNetParams(depth=27 , group_width=1_010 , w_a=1_744 , w_a=620.83 , w_m=2.52 ) ) ) , ) if model_name: convert_weight_and_push( _SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) else: for model_name, config in names_to_config.items(): convert_weight_and_push( _SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ) return config, expected_shape if __name__ == "__main__": __SCREAMING_SNAKE_CASE : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--model_name', default=None, type=str, help=( 'The name of the model you wish to convert, it must be one of the supported regnet* architecture,' ' currently: regnetx-*, regnety-*. If `None`, all of them will the converted.' ), ) parser.add_argument( '--pytorch_dump_folder_path', default=None, type=Path, required=True, help='Path to the output PyTorch model directory.', ) parser.add_argument( '--push_to_hub', default=True, type=bool, required=False, help='If True, push model and image processor to the hub.', ) __SCREAMING_SNAKE_CASE : str = parser.parse_args() __SCREAMING_SNAKE_CASE : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
233
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging __SCREAMING_SNAKE_CASE : Union[str, Any] = logging.get_logger(__name__) __SCREAMING_SNAKE_CASE : int = { 'google/canine-s': 'https://huggingface.co/google/canine-s/resolve/main/config.json', # See all CANINE models at https://huggingface.co/models?filter=canine } class __A (snake_case__): '''simple docstring''' __lowercase: List[str] = """canine""" def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=768 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : Optional[Any]=12 , UpperCAmelCase_ : Optional[Any]=3_072 , UpperCAmelCase_ : List[Any]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : Any=0.1 , UpperCAmelCase_ : List[str]=16_384 , UpperCAmelCase_ : Tuple=16 , UpperCAmelCase_ : int=0.02 , UpperCAmelCase_ : Tuple=1E-12 , UpperCAmelCase_ : str=0 , UpperCAmelCase_ : int=0XE000 , UpperCAmelCase_ : Optional[int]=0XE001 , UpperCAmelCase_ : Dict=4 , UpperCAmelCase_ : List[Any]=4 , UpperCAmelCase_ : List[Any]=8 , UpperCAmelCase_ : Dict=16_384 , UpperCAmelCase_ : Optional[int]=128 , **UpperCAmelCase_ : Any , ) ->int: """simple docstring""" super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ ) snake_case_ = max_position_embeddings snake_case_ = hidden_size snake_case_ = num_hidden_layers snake_case_ = num_attention_heads snake_case_ = intermediate_size snake_case_ = hidden_act snake_case_ = hidden_dropout_prob snake_case_ = attention_probs_dropout_prob snake_case_ = initializer_range snake_case_ = type_vocab_size snake_case_ = layer_norm_eps # Character config: snake_case_ = downsampling_rate snake_case_ = upsampling_kernel_size snake_case_ = num_hash_functions snake_case_ = num_hash_buckets snake_case_ = local_transformer_stride
233
1
"""simple docstring""" import gc import unittest from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline from transformers.pipelines import PipelineException from transformers.testing_utils import ( is_pipeline_test, is_torch_available, nested_simplify, require_tf, require_torch, require_torch_gpu, slow, ) from .test_pipelines_common import ANY @is_pipeline_test class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): """simple docstring""" __lowercase : int = MODEL_FOR_MASKED_LM_MAPPING __lowercase : Optional[int] = TF_MODEL_FOR_MASKED_LM_MAPPING def snake_case_ ( self): super().tearDown() # clean-up as much as possible GPU memory occupied by PyTorch gc.collect() if is_torch_available(): import torch torch.cuda.empty_cache() @require_tf def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""") __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""") self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6) , [ {"""sequence""": """My name is grouped""", """score""": 2.1E-05, """token""": 3_8_0_1_5, """token_str""": """ grouped"""}, {"""sequence""": """My name is accuser""", """score""": 2.1E-05, """token""": 2_5_5_0_6, """token_str""": """ accuser"""}, ] , ) __SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""") self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6) , [ { """sequence""": """The largest city in France is grouped""", """score""": 2.1E-05, """token""": 3_8_0_1_5, """token_str""": """ grouped""", }, { """sequence""": """The largest city in France is accuser""", """score""": 2.1E-05, """token""": 2_5_5_0_6, """token_str""": """ accuser""", }, ] , ) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6) , [ {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""}, {"""sequence""": """My name is Patrick""", """score""": 2E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 1.9E-05, """token""": 2_9_4_1, """token_str""": """ Te"""}, ] , ) @require_torch def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""") __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""") self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6) , [ {"""sequence""": """My name is Maul""", """score""": 2.2E-05, """token""": 3_5_6_7_6, """token_str""": """ Maul"""}, {"""sequence""": """My name isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""}, ] , ) __SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""") self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6) , [ { """sequence""": """The largest city in France is Maul""", """score""": 2.2E-05, """token""": 3_5_6_7_6, """token_str""": """ Maul""", }, {"""sequence""": """The largest city in France isELS""", """score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS"""}, ] , ) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6) , [ {"""sequence""": """My name is Patrick""", """score""": 2.1E-05, """token""": 3_4_9_9, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Te""", """score""": 2E-05, """token""": 2_9_4_1, """token_str""": """ Te"""}, {"""sequence""": """My name is Clara""", """score""": 2E-05, """token""": 1_3_6_0_6, """token_str""": """ Clara"""}, ] , ) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask> <mask>""" , top_k=2) self.assertEqual( nested_simplify(lowerCAmelCase__ , decimals=6) , [ [ { """score""": 2.2E-05, """token""": 3_5_6_7_6, """token_str""": """ Maul""", """sequence""": """<s>My name is Maul<mask></s>""", }, {"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""}, ], [ { """score""": 2.2E-05, """token""": 3_5_6_7_6, """token_str""": """ Maul""", """sequence""": """<s>My name is<mask> Maul</s>""", }, {"""score""": 2.2E-05, """token""": 1_6_4_1_6, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""}, ], ] , ) @require_torch_gpu def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""") # convert model to fp16 pipe.model.half() __SCREAMING_SNAKE_CASE = pipe("""Paris is the [MASK] of France.""") # We actually don't care about the result, we just want to make sure # it works, meaning the float16 tensor got casted back to float32 # for postprocessing. self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__) @slow @require_torch def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""") self.run_large_test(lowerCAmelCase__) @slow @require_tf def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""") self.run_large_test(lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""") self.assertEqual( nested_simplify(lowerCAmelCase__) , [ {"""sequence""": """My name is John""", """score""": 0.0_08, """token""": 6_1_0, """token_str""": """ John"""}, {"""sequence""": """My name is Chris""", """score""": 0.0_07, """token""": 1_5_7_3, """token_str""": """ Chris"""}, ] , ) __SCREAMING_SNAKE_CASE = unmasker("""The largest city in France is <mask>""") self.assertEqual( nested_simplify(lowerCAmelCase__) , [ { """sequence""": """The largest city in France is Paris""", """score""": 0.2_51, """token""": 2_2_0_1, """token_str""": """ Paris""", }, { """sequence""": """The largest city in France is Lyon""", """score""": 0.2_14, """token""": 1_2_7_9_0, """token_str""": """ Lyon""", }, ] , ) __SCREAMING_SNAKE_CASE = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3) self.assertEqual( nested_simplify(lowerCAmelCase__) , [ {"""sequence""": """My name is Patrick""", """score""": 0.0_05, """token""": 3_4_9_9, """token_str""": """ Patrick"""}, {"""sequence""": """My name is Clara""", """score""": 0.0_00, """token""": 1_3_6_0_6, """token_str""": """ Clara"""}, {"""sequence""": """My name is Te""", """score""": 0.0_00, """token""": 2_9_4_1, """token_str""": """ Te"""}, ] , ) @require_torch def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""") __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None self.run_pipeline_test(lowerCAmelCase__ , []) @require_tf def snake_case_ ( self): __SCREAMING_SNAKE_CASE = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""") __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = None self.run_pipeline_test(lowerCAmelCase__ , []) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__): if tokenizer is None or tokenizer.mask_token_id is None: self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""") __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [ f"This is another {tokenizer.mask_token} test", ] return fill_masker, examples def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = fill_masker.tokenizer __SCREAMING_SNAKE_CASE = fill_masker.model __SCREAMING_SNAKE_CASE = fill_masker( f"This is a {tokenizer.mask_token}" , ) self.assertEqual( lowerCAmelCase__ , [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ] , ) __SCREAMING_SNAKE_CASE = fill_masker([f"This is a {tokenizer.mask_token}"]) self.assertEqual( lowerCAmelCase__ , [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ] , ) __SCREAMING_SNAKE_CASE = fill_masker([f"This is a {tokenizer.mask_token}", f"Another {tokenizer.mask_token} great test."]) self.assertEqual( lowerCAmelCase__ , [ [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ], [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ], ] , ) with self.assertRaises(lowerCAmelCase__): fill_masker([None]) # No mask_token is not supported with self.assertRaises(lowerCAmelCase__): fill_masker("""This is""") self.run_test_top_k(lowerCAmelCase__ , lowerCAmelCase__) self.run_test_targets(lowerCAmelCase__ , lowerCAmelCase__) self.run_test_top_k_targets(lowerCAmelCase__ , lowerCAmelCase__) self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase__ , lowerCAmelCase__) self.fill_mask_with_multiple_masks(lowerCAmelCase__ , lowerCAmelCase__) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = tokenizer.get_vocab() __SCREAMING_SNAKE_CASE = sorted(vocab.keys())[:2] # Pipeline argument __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , targets=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( lowerCAmelCase__ , [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ] , ) __SCREAMING_SNAKE_CASE = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCAmelCase__)) # Call argument __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowerCAmelCase__) self.assertEqual( lowerCAmelCase__ , [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ] , ) __SCREAMING_SNAKE_CASE = {vocab[el] for el in targets} self.assertEqual({el["""token"""] for el in outputs} , lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [tokenizer.decode([x]) for x in target_ids] self.assertEqual({el["""token_str"""] for el in outputs} , set(lowerCAmelCase__)) # Score equivalence __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [top_mask["""token_str"""] for top_mask in outputs] __SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in outputs] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase__) == set(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , targets=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = [top_mask["""score"""] for top_mask in unmasked_targets] self.assertEqual(nested_simplify(lowerCAmelCase__) , nested_simplify(lowerCAmelCase__)) # Raises with invalid with self.assertRaises(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[]) # For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised if "" not in tokenizer.get_vocab(): with self.assertRaises(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , targets=[""""""]) with self.assertRaises(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , targets="""""") def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , top_k=2) __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}") self.assertEqual( lowerCAmelCase__ , [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ] , ) __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2) self.assertEqual( lowerCAmelCase__ , [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ] , ) self.assertEqual(nested_simplify(lowerCAmelCase__) , nested_simplify(lowerCAmelCase__)) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = tokenizer.get_vocab() __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) # top_k=2, ntargets=3 __SCREAMING_SNAKE_CASE = sorted(vocab.keys())[:3] __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=2 , targets=lowerCAmelCase__) # If we use the most probably targets, and filter differently, we should still # have the same results __SCREAMING_SNAKE_CASE = [el["""token_str"""] for el in sorted(lowerCAmelCase__ , key=lambda lowerCAmelCase__: x["score"] , reverse=lowerCAmelCase__)] # For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`. if set(lowerCAmelCase__).issubset(lowerCAmelCase__): __SCREAMING_SNAKE_CASE = fill_masker(f"This is a {tokenizer.mask_token}" , top_k=3 , targets=lowerCAmelCase__) # They should yield exactly the same result self.assertEqual(nested_simplify(lowerCAmelCase__) , nested_simplify(lowerCAmelCase__)) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = tokenizer.get_vocab() # String duplicates + id duplicates __SCREAMING_SNAKE_CASE = sorted(vocab.keys())[:3] __SCREAMING_SNAKE_CASE = [targets[0], targets[1], targets[0], targets[2], targets[1]] __SCREAMING_SNAKE_CASE = fill_masker(f"My name is {tokenizer.mask_token}" , targets=lowerCAmelCase__ , top_k=1_0) # The target list contains duplicates, so we can't output more # than them self.assertEqual(len(lowerCAmelCase__) , 3) def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__): __SCREAMING_SNAKE_CASE = FillMaskPipeline(model=lowerCAmelCase__ , tokenizer=lowerCAmelCase__) __SCREAMING_SNAKE_CASE = fill_masker( f"This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}" , top_k=2) self.assertEqual( lowerCAmelCase__ , [ [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ], [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ], [ {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, {"""sequence""": ANY(lowerCAmelCase__), """score""": ANY(lowerCAmelCase__), """token""": ANY(lowerCAmelCase__), """token_str""": ANY(lowerCAmelCase__)}, ], ] , )
100
"""simple docstring""" from ..utils import DummyObject, requires_backends class SCREAMING_SNAKE_CASE_ ( metaclass=__a ): """simple docstring""" __lowercase : Tuple = ['''keras_nlp'''] def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__): requires_backends(self , ["""keras_nlp"""])
100
1
"""simple docstring""" import argparse import os import evaluate import torch from datasets import load_dataset from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing the experiment tracking capability, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## a : int = 16 a : str = 32 def lowercase__(A , A = 16 ) ->Optional[Any]: """simple docstring""" lowercase__ : Tuple= AutoTokenizer.from_pretrained("bert-base-cased" ) lowercase__ : str= load_dataset("glue" , "mrpc" ) def tokenize_function(A ): # max_length=None => use the model max length (it's actually the default) lowercase__ : Any= tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=A , max_length=A ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): lowercase__ : Optional[int]= datasets.map( A , batched=A , remove_columns=["idx", "sentence1", "sentence2"] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library lowercase__ : List[str]= tokenized_datasets.rename_column("label" , "labels" ) def collate_fn(A ): # On TPU it's best to pad everything to the same length or training will be very slow. lowercase__ : List[Any]= 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": lowercase__ : Dict= 16 elif accelerator.mixed_precision != "no": lowercase__ : Dict= 8 else: lowercase__ : Optional[int]= None return tokenizer.pad( A , padding="longest" , max_length=A , pad_to_multiple_of=A , return_tensors="pt" , ) # Instantiate dataloaders. lowercase__ : List[Any]= DataLoader( tokenized_datasets["train"] , shuffle=A , collate_fn=A , batch_size=A ) lowercase__ : str= DataLoader( tokenized_datasets["validation"] , shuffle=A , collate_fn=A , batch_size=A ) return train_dataloader, eval_dataloader # For testing only if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1": from accelerate.test_utils.training import mocked_dataloaders a : Optional[Any] = mocked_dataloaders # noqa: F811 def lowercase__(A , A ) ->Tuple: """simple docstring""" if os.environ.get("TESTING_MOCKED_DATALOADERS" , A ) == "1": lowercase__ : List[Any]= 2 # Initialize Accelerator # New Code # # We pass in "all" to `log_with` to grab all available trackers in the environment # Note: If using a custom `Tracker` class, should be passed in here such as: # >>> log_with = ["all", MyCustomTrackerClassInstance()] if args.with_tracking: lowercase__ : Any= Accelerator( cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir ) else: lowercase__ : int= Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs lowercase__ : Any= config["lr"] lowercase__ : Tuple= int(config["num_epochs"] ) lowercase__ : Optional[Any]= int(config["seed"] ) lowercase__ : Optional[int]= int(config["batch_size"] ) set_seed(A ) lowercase__ : Tuple= get_dataloaders(A , A ) lowercase__ : str= evaluate.load("glue" , "mrpc" ) # If the batch size is too big we use gradient accumulation lowercase__ : List[str]= 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: lowercase__ : Tuple= batch_size // MAX_GPU_BATCH_SIZE lowercase__ : Dict= MAX_GPU_BATCH_SIZE # Instantiate the model (we build the model here so that the seed also control new weights initialization) lowercase__ : str= AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=A ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). lowercase__ : Optional[Any]= model.to(accelerator.device ) # Instantiate optimizer lowercase__ : int= AdamW(params=model.parameters() , lr=A ) # Instantiate scheduler lowercase__ : str= get_linear_schedule_with_warmup( optimizer=A , num_warmup_steps=100 , num_training_steps=(len(A ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. lowercase__ : Tuple= accelerator.prepare( A , A , A , A , A ) # New Code # # We need to initialize the trackers we use. Overall configurations can also be stored if args.with_tracking: lowercase__ : List[str]= os.path.split(A )[-1].split("." )[0] accelerator.init_trackers(A , A ) # Now we train the model for epoch in range(A ): model.train() # New Code # # For our tracking example, we will log the total loss of each epoch if args.with_tracking: lowercase__ : Union[str, Any]= 0 for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) lowercase__ : Any= model(**A ) lowercase__ : Union[str, Any]= outputs.loss # New Code # if args.with_tracking: total_loss += loss.detach().float() lowercase__ : Optional[int]= loss / gradient_accumulation_steps accelerator.backward(A ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(A ): # We could avoid this line since we set the accelerator with `device_placement=True` (the default). batch.to(accelerator.device ) with torch.no_grad(): lowercase__ : Optional[int]= model(**A ) lowercase__ : Dict= outputs.logits.argmax(dim=-1 ) lowercase__ : Dict= accelerator.gather_for_metrics((predictions, batch["labels"]) ) metric.add_batch( predictions=A , references=A , ) lowercase__ : Any= metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(f'''epoch {epoch}:''' , A ) # New Code # # To actually log, we call `Accelerator.log` # The values passed can be of `str`, `int`, `float` or `dict` of `str` to `float`/`int` if args.with_tracking: accelerator.log( { "accuracy": eval_metric["accuracy"], "f1": eval_metric["f1"], "train_loss": total_loss.item() / len(A ), "epoch": epoch, } , step=A , ) # New Code # # When a run is finished, you should call `accelerator.end_training()` # to close all of the open trackers if args.with_tracking: accelerator.end_training() def lowercase__() ->List[Any]: """simple docstring""" lowercase__ : List[Any]= argparse.ArgumentParser(description="Simple example of training script." ) parser.add_argument( "--mixed_precision" , type=A , default=A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose" "between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10." "and an Nvidia Ampere GPU." , ) parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." ) parser.add_argument( "--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , ) parser.add_argument( "--project_dir" , type=A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , ) lowercase__ : Any= parser.parse_args() lowercase__ : Optional[Any]= {"lr": 2e-5, "num_epochs": 3, "seed": 42, "batch_size": 16} training_function(A , A ) if __name__ == "__main__": main()
370
"""simple docstring""" import inspect import unittest import torch import torch.nn as nn from accelerate.hooks import ( AlignDevicesHook, ModelHook, SequentialHook, add_hook_to_module, attach_align_device_hook, remove_hook_from_module, remove_hook_from_submodules, ) from accelerate.test_utils import require_multi_gpu class __UpperCAmelCase( nn.Module ): """simple docstring""" def __init__( self ): '''simple docstring''' super().__init__() lowercase__ : Any= nn.Linear(3 , 4 ) lowercase__ : Tuple= nn.BatchNormad(4 ) lowercase__ : Dict= nn.Linear(4 , 5 ) def UpperCAmelCase_ ( self , snake_case__ ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) ) class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ , *snake_case__ , **snake_case__ ): '''simple docstring''' return (args[0] + 1,) + args[1:], kwargs class __UpperCAmelCase( SCREAMING_SNAKE_CASE__ ): """simple docstring""" def UpperCAmelCase_ ( self , snake_case__ , snake_case__ ): '''simple docstring''' return output + 1 class __UpperCAmelCase( unittest.TestCase ): """simple docstring""" def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : str= ModelForTest() lowercase__ : str= ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) self.assertEqual(test_model._hf_hook , snake_case__ ) self.assertTrue(hasattr(snake_case__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , "_hf_hook" ) ) self.assertFalse(hasattr(snake_case__ , "_old_forward" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= ModelForTest() lowercase__ : int= ModelHook() add_hook_to_module(snake_case__ , snake_case__ ) add_hook_to_module(snake_case__ , snake_case__ , append=snake_case__ ) self.assertEqual(isinstance(test_model._hf_hook , snake_case__ ) , snake_case__ ) self.assertEqual(len(test_model._hf_hook.hooks ) , 2 ) self.assertTrue(hasattr(snake_case__ , "_old_forward" ) ) # Check adding the hook did not change the name or the signature self.assertEqual(test_model.forward.__name__ , "forward" ) self.assertListEqual(list(inspect.signature(test_model.forward ).parameters ) , ["x"] ) remove_hook_from_module(snake_case__ ) self.assertFalse(hasattr(snake_case__ , "_hf_hook" ) ) self.assertFalse(hasattr(snake_case__ , "_old_forward" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ModelForTest() lowercase__ : int= torch.randn(2 , 3 ) lowercase__ : Optional[Any]= test_model(x + 1 ) lowercase__ : Tuple= test_model(x + 2 ) lowercase__ : str= PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Tuple= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowercase__ : Tuple= PreForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Optional[Any]= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowercase__ : List[str]= SequentialHook(PreForwardHook() , PreForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Dict= test_model(snake_case__ ) assert torch.allclose(snake_case__ , snake_case__ , atol=1e-5 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Tuple= ModelForTest() lowercase__ : Optional[int]= torch.randn(2 , 3 ) lowercase__ : Optional[int]= test_model(snake_case__ ) lowercase__ : str= PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Optional[int]= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1e-5 ) ) # Attaching a hook to a model when it already has one replaces, does not chain lowercase__ : Tuple= PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Dict= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 , atol=1e-5 ) ) # You need to use the sequential hook to chain two or more hooks lowercase__ : Optional[Any]= SequentialHook(PostForwardHook() , PostForwardHook() ) add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : List[str]= test_model(snake_case__ ) assert torch.allclose(snake_case__ , output + 2 , atol=1e-5 ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : int= ModelForTest() lowercase__ : Optional[Any]= torch.randn(2 , 3 ) lowercase__ : int= test_model(snake_case__ ) lowercase__ : Union[str, Any]= PostForwardHook() add_hook_to_module(snake_case__ , snake_case__ ) lowercase__ : Dict= test_model(snake_case__ ) self.assertTrue(torch.allclose(snake_case__ , output + 1 ) ) self.assertTrue(outputa.requires_grad ) lowercase__ : Any= True lowercase__ : Optional[int]= test_model(snake_case__ ) self.assertFalse(outputa.requires_grad ) @require_multi_gpu def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(execution_device=0 ) ) add_hook_to_module(model.lineara , AlignDevicesHook(execution_device=1 ) ) self.assertEqual(model.lineara.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.weight.device , torch.device(0 ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device(0 ) ) self.assertEqual(model.lineara.weight.device , torch.device(1 ) ) # We can still make a forward pass. The input does not need to be on any particular device lowercase__ : int= torch.randn(2 , 3 ) lowercase__ : List[str]= model(snake_case__ ) self.assertEqual(output.device , torch.device(1 ) ) # We can add a general hook to put back output on same device as input. add_hook_to_module(snake_case__ , AlignDevicesHook(io_same_device=snake_case__ ) ) lowercase__ : Tuple= torch.randn(2 , 3 ).to(0 ) lowercase__ : Optional[Any]= model(snake_case__ ) self.assertEqual(output.device , torch.device(0 ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Optional[Any]= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowercase__ : Optional[int]= {"execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True} add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowercase__ : Optional[int]= torch.device(hook_kwargs["execution_device"] ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) lowercase__ : List[Any]= torch.randn(2 , 3 ) lowercase__ : str= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload lowercase__ : Optional[int]= { "execution_device": 0 if torch.cuda.is_available() else "cpu", "offload": True, "offload_buffers": True, } add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.batchnorm , AlignDevicesHook(**snake_case__ ) ) add_hook_to_module(model.lineara , AlignDevicesHook(**snake_case__ ) ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowercase__ : str= torch.randn(2 , 3 ) lowercase__ : str= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_module(model.lineara ) remove_hook_from_module(model.batchnorm ) remove_hook_from_module(model.lineara ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Dict= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowercase__ : str= 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowercase__ : Dict= torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) lowercase__ : Optional[Any]= torch.randn(2 , 3 ) lowercase__ : List[Any]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook(snake_case__ , execution_device=snake_case__ , offload=snake_case__ , offload_buffers=snake_case__ ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowercase__ : List[str]= torch.randn(2 , 3 ) lowercase__ : List[Any]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) def UpperCAmelCase_ ( self ): '''simple docstring''' lowercase__ : Any= ModelForTest() # Everything is on CPU self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # This will move each submodule on different devices lowercase__ : Optional[Any]= 0 if torch.cuda.is_available() else "cpu" attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() ) # Parameters have been offloaded, so on the meta device self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) # Buffers are not included in the offload by default, so are on the execution device lowercase__ : Tuple= torch.device(snake_case__ ) self.assertEqual(model.batchnorm.running_mean.device , snake_case__ ) lowercase__ : str= torch.randn(2 , 3 ) lowercase__ : List[Any]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) # Now test with buffers included in the offload attach_align_device_hook( snake_case__ , execution_device=snake_case__ , offload=snake_case__ , weights_map=model.state_dict() , offload_buffers=snake_case__ , ) # Parameters have been offloaded, so on the meta device, buffers included self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("meta" ) ) self.assertEqual(model.lineara.weight.device , torch.device("meta" ) ) self.assertEqual(model.batchnorm.running_mean.device , torch.device("meta" ) ) lowercase__ : Dict= torch.randn(2 , 3 ) lowercase__ : List[str]= model(snake_case__ ) self.assertEqual(output.device , snake_case__ ) # Removing hooks loads back the weights in the model. remove_hook_from_submodules(snake_case__ ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) ) self.assertEqual(model.batchnorm.weight.device , torch.device("cpu" ) ) self.assertEqual(model.lineara.weight.device , torch.device("cpu" ) )
150
0
"""simple docstring""" from typing import List, Optional, Union from ...image_utils import ImageInput from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy from ...utils import TensorType class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : str = ["""image_processor""", """tokenizer"""] a_ : int = """BlipImageProcessor""" a_ : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""") def __init__( self : List[Any] , a_ : List[Any] , a_ : int ): lowerCAmelCase_ : Union[str, Any] = False super().__init__(a_ , a_ ) lowerCAmelCase_ : Any = self.image_processor def __call__( self : int , a_ : ImageInput = None , a_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a_ : bool = True , a_ : Union[bool, str, PaddingStrategy] = False , a_ : Union[bool, str, TruncationStrategy] = None , a_ : Optional[int] = None , a_ : int = 0 , a_ : Optional[int] = None , a_ : Optional[bool] = None , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = False , a_ : bool = True , a_ : Optional[Union[str, TensorType]] = None , **a_ : Dict , ): if images is None and text is None: raise ValueError("You have to specify either images or text." ) # Get only text if images is None: lowerCAmelCase_ : Tuple = self.tokenizer lowerCAmelCase_ : int = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) return text_encoding # add pixel_values lowerCAmelCase_ : Union[str, Any] = self.image_processor(a_ , return_tensors=a_ ) if text is not None: lowerCAmelCase_ : List[str] = self.tokenizer( text=a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , stride=a_ , pad_to_multiple_of=a_ , return_attention_mask=a_ , return_overflowing_tokens=a_ , return_special_tokens_mask=a_ , return_offsets_mapping=a_ , return_token_type_ids=a_ , return_length=a_ , verbose=a_ , return_tensors=a_ , **a_ , ) else: lowerCAmelCase_ : Optional[int] = None if text_encoding is not None: encoding_image_processor.update(a_ ) return encoding_image_processor def lowerCamelCase ( self : Union[str, Any] , *a_ : Tuple , **a_ : int ): return self.tokenizer.batch_decode(*a_ , **a_ ) def lowerCamelCase ( self : Optional[Any] , *a_ : Any , **a_ : int ): return self.tokenizer.decode(*a_ , **a_ ) @property def lowerCamelCase ( self : List[str] ): lowerCAmelCase_ : Optional[int] = self.tokenizer.model_input_names lowerCAmelCase_ : Optional[int] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
241
"""simple docstring""" import copy from typing import Dict, Optional from ...configuration_utils import PretrainedConfig from ...utils import logging from ..auto import CONFIG_MAPPING from ..detr import DetrConfig from ..swin import SwinConfig lowercase__ = { """facebook/maskformer-swin-base-ade""": ( """https://huggingface.co/facebook/maskformer-swin-base-ade/blob/main/config.json""" ) # See all MaskFormer models at https://huggingface.co/models?filter=maskformer } lowercase__ = logging.get_logger(__name__) class __lowerCamelCase ( A__ ): '''simple docstring''' a_ : Optional[int] = """maskformer""" a_ : Optional[int] = {"""hidden_size""": """mask_feature_size"""} a_ : Optional[int] = ["""resnet""", """swin"""] a_ : int = ["""detr"""] def __init__( self : str , a_ : int = 2_56 , a_ : int = 2_56 , a_ : float = 0.1 , a_ : bool = False , a_ : Optional[Dict] = None , a_ : Optional[Dict] = None , a_ : float = 0.02 , a_ : float = 1.0 , a_ : float = 1.0 , a_ : float = 1.0 , a_ : float = 20.0 , a_ : Optional[bool] = None , **a_ : str , ): if backbone_config is None: # fall back to https://huggingface.co/microsoft/swin-base-patch4-window12-384-in22k lowerCAmelCase_ : Tuple = SwinConfig( image_size=3_84 , in_channels=3 , patch_size=4 , embed_dim=1_28 , depths=[2, 2, 18, 2] , num_heads=[4, 8, 16, 32] , window_size=12 , drop_path_rate=0.3 , out_features=["stage1", "stage2", "stage3", "stage4"] , ) if isinstance(a_ , a_ ): lowerCAmelCase_ : Optional[Any] = backbone_config.pop("model_type" ) lowerCAmelCase_ : Any = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase_ : str = config_class.from_dict(a_ ) # verify that the backbone is supported if backbone_config.model_type not in self.backbones_supported: logger.warning_once( f'''Backbone {backbone_config.model_type} is not a supported model and may not be compatible with MaskFormer. ''' f'''Supported model types: {",".join(self.backbones_supported )}''' ) if decoder_config is None: # fall back to https://huggingface.co/facebook/detr-resnet-50 lowerCAmelCase_ : Union[str, Any] = DetrConfig() else: # verify that the decoder is supported lowerCAmelCase_ : Optional[Any] = ( decoder_config.pop("model_type" ) if isinstance(a_ , a_ ) else decoder_config.model_type ) if decoder_type not in self.decoders_supported: raise ValueError( f'''Transformer Decoder {decoder_type} not supported, please use one of''' f''' {",".join(self.decoders_supported )}''' ) if isinstance(a_ , a_ ): lowerCAmelCase_ : Optional[int] = CONFIG_MAPPING[decoder_type] lowerCAmelCase_ : List[Any] = config_class.from_dict(a_ ) lowerCAmelCase_ : str = backbone_config lowerCAmelCase_ : Tuple = decoder_config # main feature dimension for the model lowerCAmelCase_ : str = fpn_feature_size lowerCAmelCase_ : str = mask_feature_size # initializer lowerCAmelCase_ : List[Any] = init_std lowerCAmelCase_ : Tuple = init_xavier_std # Hungarian matcher && loss lowerCAmelCase_ : int = cross_entropy_weight lowerCAmelCase_ : Dict = dice_weight lowerCAmelCase_ : int = mask_weight lowerCAmelCase_ : Any = use_auxiliary_loss lowerCAmelCase_ : Dict = no_object_weight lowerCAmelCase_ : Optional[int] = output_auxiliary_logits lowerCAmelCase_ : int = self.decoder_config.encoder_attention_heads lowerCAmelCase_ : str = self.decoder_config.num_hidden_layers super().__init__(**a_ ) @classmethod def lowerCamelCase ( cls : int , a_ : PretrainedConfig , a_ : PretrainedConfig , **a_ : Tuple ): return cls( backbone_config=a_ , decoder_config=a_ , **a_ , ) def lowerCamelCase ( self : Any ): lowerCAmelCase_ : Optional[int] = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : Optional[Any] = self.backbone_config.to_dict() lowerCAmelCase_ : Union[str, Any] = self.decoder_config.to_dict() lowerCAmelCase_ : List[str] = self.__class__.model_type return output
241
1
from __future__ import annotations _lowerCamelCase : List[str] = list[list[int]] # assigning initial values to the grid _lowerCamelCase : Matrix = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution _lowerCamelCase : Matrix = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def _a ( SCREAMING_SNAKE_CASE__ : Matrix , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def _a ( SCREAMING_SNAKE_CASE__ : Matrix ) -> List[str]: '''simple docstring''' for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def _a ( SCREAMING_SNAKE_CASE__ : Matrix ) -> Optional[int]: '''simple docstring''' if location := find_empty_location(UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ): SCREAMING_SNAKE_CASE__ : List[Any] = digit if sudoku(UpperCAmelCase_ ) is not None: return grid SCREAMING_SNAKE_CASE__ : List[str] = 0 return None def _a ( SCREAMING_SNAKE_CASE__ : Matrix ) -> int: '''simple docstring''' for row in grid: for cell in row: print(UpperCAmelCase_ , end=" " ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 2_0) print_solution(example_grid) print('''\nExample grid solution:''') _lowerCamelCase : List[Any] = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
353
import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import AddedToken, PreTrainedTokenizer from ...utils import logging _lowerCamelCase : int = logging.get_logger(__name__) _lowerCamelCase : Optional[Any] = '''▁''' _lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''} _lowerCamelCase : int = { '''vocab_file''': { '''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''', '''xlm-roberta-large-finetuned-conll02-dutch''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll02-spanish''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-english''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model''' ), '''xlm-roberta-large-finetuned-conll03-german''': ( '''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model''' ), } } _lowerCamelCase : Optional[Any] = { '''xlm-roberta-base''': 5_1_2, '''xlm-roberta-large''': 5_1_2, '''xlm-roberta-large-finetuned-conll02-dutch''': 5_1_2, '''xlm-roberta-large-finetuned-conll02-spanish''': 5_1_2, '''xlm-roberta-large-finetuned-conll03-english''': 5_1_2, '''xlm-roberta-large-finetuned-conll03-german''': 5_1_2, } class lowerCamelCase (__lowerCamelCase ): """simple docstring""" UpperCAmelCase_ = VOCAB_FILES_NAMES UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES UpperCAmelCase_ = ["input_ids", "attention_mask"] def __init__( self : Dict, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int]="<s>", _UpperCAmelCase : Optional[int]="</s>", _UpperCAmelCase : Dict="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Union[str, Any]="<unk>", _UpperCAmelCase : List[Any]="<pad>", _UpperCAmelCase : str="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : List[Any], ) -> None: """simple docstring""" # Mask token behave like a normal word, i.e. include the space before it SCREAMING_SNAKE_CASE__ : int = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token SCREAMING_SNAKE_CASE__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, ) SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(_UpperCAmelCase ) ) SCREAMING_SNAKE_CASE__ : Tuple = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # Mimic fairseq token-to-id alignment for the first 4 token SCREAMING_SNAKE_CASE__ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3} # The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab SCREAMING_SNAKE_CASE__ : Dict = 1 SCREAMING_SNAKE_CASE__ : int = len(self.sp_model ) + self.fairseq_offset SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()} def __getstate__( self : str ) -> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy() SCREAMING_SNAKE_CASE__ : List[Any] = None SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.serialized_model_proto() return state def __setstate__( self : int, _UpperCAmelCase : List[Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = d # for backward compatibility if not hasattr(self, "sp_model_kwargs" ): SCREAMING_SNAKE_CASE__ : Dict = {} SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.LoadFromSerializedProto(self.sp_model_proto ) def A_ ( self : Any, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id] SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def A_ ( self : List[Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]: """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase ) if token_ids_a is None: return [1] + ([0] * len(_UpperCAmelCase )) + [1] return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1] def A_ ( self : Union[str, Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id] SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def A_ ( self : List[str] ) -> List[str]: """simple docstring""" return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token def A_ ( self : List[Any] ) -> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def A_ ( self : List[str], _UpperCAmelCase : str ) -> List[str]: """simple docstring""" return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase ) def A_ ( self : Optional[Any], _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.PieceToId(_UpperCAmelCase ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def A_ ( self : Tuple, _UpperCAmelCase : List[str] ) -> List[str]: """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def A_ ( self : Any, _UpperCAmelCase : int ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip() return out_string def A_ ( self : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]: """simple docstring""" if not os.path.isdir(_UpperCAmelCase ): logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' ) return SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join( _UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file, _UpperCAmelCase ) elif not os.path.isfile(self.vocab_file ): with open(_UpperCAmelCase, "wb" ) as fi: SCREAMING_SNAKE_CASE__ : Any = self.sp_model.serialized_model_proto() fi.write(_UpperCAmelCase ) return (out_vocab_file,)
191
0
import argparse import torch from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel from transformers.utils import logging logging.set_verbosity_info() def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> Optional[Any]: # Initialise PyTorch model _lowercase : Union[str, Any] = FunnelConfig.from_json_file(lowerCamelCase_ ) print(F'''Building PyTorch model from configuration: {config}''' ) _lowercase : Tuple = FunnelBaseModel(lowerCamelCase_ ) if base_model else FunnelModel(lowerCamelCase_ ) # Load weights from tf checkpoint load_tf_weights_in_funnel(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) # Save pytorch-model print(F'''Save PyTorch model to {pytorch_dump_path}''' ) torch.save(model.state_dict() , lowerCamelCase_ ) if __name__ == "__main__": SCREAMING_SNAKE_CASE : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--base_model", action="store_true", help="Whether you want just the base model (no decoder) or not." ) SCREAMING_SNAKE_CASE : Tuple = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model )
21
'''simple docstring''' from datetime import datetime import matplotlib.pyplot as plt import torch def lowercase ( __magic_name__ ): '''simple docstring''' for param in module.parameters(): UpperCAmelCase : Any = False def lowercase ( ): '''simple docstring''' UpperCAmelCase : int = "cuda" if torch.cuda.is_available() else "cpu" if torch.backends.mps.is_available() and torch.backends.mps.is_built(): UpperCAmelCase : int = "mps" if device == "mps": print( "WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch" " errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues" " with generations." ) return device def lowercase ( __magic_name__ ): '''simple docstring''' UpperCAmelCase : str = plt.imshow(__magic_name__ ) fig.axes.get_xaxis().set_visible(__magic_name__ ) fig.axes.get_yaxis().set_visible(__magic_name__ ) plt.show() def lowercase ( ): '''simple docstring''' UpperCAmelCase : str = datetime.now() UpperCAmelCase : Tuple = current_time.strftime("%H:%M:%S" ) return timestamp
311
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowercase_ = logging.get_logger(__name__) lowercase_ = { "xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json", "xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json", "xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json", "xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json", "xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json", "xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json", "xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json", "xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json", "xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json", "xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json", } class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : List[str] = 'xlm' __UpperCAmelCase : int = { 'hidden_size': 'emb_dim', 'num_attention_heads': 'n_heads', 'num_hidden_layers': 'n_layers', 'n_words': 'vocab_size', # For backward compatibility } def __init__( self , _a=30_145 , _a=2_048 , _a=12 , _a=16 , _a=0.1 , _a=0.1 , _a=True , _a=False , _a=False , _a=False , _a=1 , _a=True , _a=512 , _a=2_048**-0.5 , _a=1E-12 , _a=0.02 , _a=0 , _a=1 , _a=2 , _a=3 , _a=5 , _a=True , _a="first" , _a=True , _a=None , _a=True , _a=0.1 , _a=5 , _a=5 , _a=0 , _a=0 , _a=2 , _a=0 , **_a , ): __a = vocab_size __a = emb_dim __a = n_layers __a = n_heads __a = dropout __a = attention_dropout __a = gelu_activation __a = sinusoidal_embeddings __a = causal __a = asm __a = n_langs __a = use_lang_emb __a = layer_norm_eps __a = bos_index __a = eos_index __a = pad_index __a = unk_index __a = mask_index __a = is_encoder __a = max_position_embeddings __a = embed_init_std __a = init_std __a = summary_type __a = summary_use_proj __a = summary_activation __a = summary_proj_to_labels __a = summary_first_dropout __a = start_n_top __a = end_n_top __a = mask_token_id __a = lang_id if "n_words" in kwargs: __a = kwargs['''n_words'''] super().__init__(pad_token_id=_a , bos_token_id=_a , **_a ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ): '''simple docstring''' @property def __UpperCAmelCase ( self ): if self.task == "multiple-choice": __a = {0: '''batch''', 1: '''choice''', 2: '''sequence'''} else: __a = {0: '''batch''', 1: '''sequence'''} return OrderedDict( [ ('''input_ids''', dynamic_axis), ('''attention_mask''', dynamic_axis), ('''token_type_ids''', dynamic_axis), ] )
11
"""simple docstring""" import math from collections import defaultdict from typing import List, Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput def lowercase ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any]=0.9_99 , lowerCAmelCase__ : List[str]="cosine" , ) -> Optional[int]: if alpha_transform_type == "cosine": def alpha_bar_fn(lowerCAmelCase__ : int ): return math.cos((t + 0.0_08) / 1.0_08 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(lowerCAmelCase__ : Optional[Any] ): return math.exp(t * -12.0 ) else: raise ValueError(f'''Unsupported alpha_tranform_type: {alpha_transform_type}''' ) __a = [] for i in range(lowerCAmelCase__ ): __a = i / num_diffusion_timesteps __a = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(lowerCAmelCase__ ) / alpha_bar_fn(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) return torch.tensor(lowerCAmelCase__ , dtype=torch.floataa ) class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): '''simple docstring''' __UpperCAmelCase : Tuple = [e.name for e in KarrasDiffusionSchedulers] __UpperCAmelCase : str = 2 @register_to_config def __init__( self , _a = 1_000 , _a = 0.0_0085 , _a = 0.012 , _a = "linear" , _a = None , _a = "epsilon" , _a = "linspace" , _a = 0 , ): if trained_betas is not None: __a = torch.tensor(_a , dtype=torch.floataa ) elif beta_schedule == "linear": __a = torch.linspace(_a , _a , _a , dtype=torch.floataa ) elif beta_schedule == "scaled_linear": # this schedule is very specific to the latent diffusion model. __a = ( torch.linspace(beta_start**0.5 , beta_end**0.5 , _a , dtype=torch.floataa ) ** 2 ) elif beta_schedule == "squaredcos_cap_v2": # Glide cosine schedule __a = betas_for_alpha_bar(_a ) else: raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' ) __a = 1.0 - self.betas __a = torch.cumprod(self.alphas , dim=0 ) # set all values self.set_timesteps(_a , _a , _a ) def __UpperCAmelCase ( self , _a , _a=None ): if schedule_timesteps is None: __a = self.timesteps __a = (schedule_timesteps == timestep).nonzero() # The sigma index that is taken for the **very** first `step` # is always the second index (or the last index if there is only 1) # This way we can ensure we don't accidentally skip a sigma in # case we start in the middle of the denoising schedule (e.g. for image-to-image) if len(self._index_counter ) == 0: __a = 1 if len(_a ) > 1 else 0 else: __a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep __a = self._index_counter[timestep_int] return indices[pos].item() @property def __UpperCAmelCase ( self ): # standard deviation of the initial noise distribution if self.config.timestep_spacing in ["linspace", "trailing"]: return self.sigmas.max() return (self.sigmas.max() ** 2 + 1) ** 0.5 def __UpperCAmelCase ( self , _a , _a , ): __a = self.index_for_timestep(_a ) if self.state_in_first_order: __a = self.sigmas[step_index] else: __a = self.sigmas_interpol[step_index] __a = sample / ((sigma**2 + 1) ** 0.5) return sample def __UpperCAmelCase ( self , _a , _a = None , _a = None , ): __a = num_inference_steps __a = num_train_timesteps or self.config.num_train_timesteps # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 if self.config.timestep_spacing == "linspace": __a = np.linspace(0 , num_train_timesteps - 1 , _a , dtype=_a )[::-1].copy() elif self.config.timestep_spacing == "leading": __a = num_train_timesteps // self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __a = (np.arange(0 , _a ) * step_ratio).round()[::-1].copy().astype(_a ) timesteps += self.config.steps_offset elif self.config.timestep_spacing == "trailing": __a = num_train_timesteps / self.num_inference_steps # creates integer timesteps by multiplying by ratio # casting to int to avoid issues when num_inference_step is power of 3 __a = (np.arange(_a , 0 , -step_ratio )).round().copy().astype(_a ) timesteps -= 1 else: raise ValueError( f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' ) __a = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 ) __a = torch.from_numpy(np.log(_a ) ).to(_a ) __a = np.interp(_a , np.arange(0 , len(_a ) ) , _a ) __a = np.concatenate([sigmas, [0.0]] ).astype(np.floataa ) __a = torch.from_numpy(_a ).to(device=_a ) # interpolate sigmas __a = sigmas.log().lerp(sigmas.roll(1 ).log() , 0.5 ).exp() __a = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] ) __a = torch.cat( [sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] ) if str(_a ).startswith('''mps''' ): # mps does not support float64 __a = torch.from_numpy(_a ).to(_a , dtype=torch.floataa ) else: __a = torch.from_numpy(_a ).to(_a ) # interpolate timesteps __a = self.sigma_to_t(_a ).to(_a , dtype=timesteps.dtype ) __a = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) , dim=-1 ).flatten() __a = torch.cat([timesteps[:1], interleaved_timesteps] ) __a = None # for exp beta schedules, such as the one for `pipeline_shap_e.py` # we need an index counter __a = defaultdict(_a ) def __UpperCAmelCase ( self , _a ): # get log sigma __a = sigma.log() # get distribution __a = log_sigma - self.log_sigmas[:, None] # get sigmas range __a = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 ) __a = low_idx + 1 __a = self.log_sigmas[low_idx] __a = self.log_sigmas[high_idx] # interpolate sigmas __a = (low - log_sigma) / (low - high) __a = w.clamp(0 , 1 ) # transform interpolation to time range __a = (1 - w) * low_idx + w * high_idx __a = t.view(sigma.shape ) return t @property def __UpperCAmelCase ( self ): return self.sample is None def __UpperCAmelCase ( self , _a , _a , _a , _a = True , ): __a = self.index_for_timestep(_a ) # advance index counter by 1 __a = timestep.cpu().item() if torch.is_tensor(_a ) else timestep self._index_counter[timestep_int] += 1 if self.state_in_first_order: __a = self.sigmas[step_index] __a = self.sigmas_interpol[step_index + 1] __a = self.sigmas[step_index + 1] else: # 2nd order / KDPM2's method __a = self.sigmas[step_index - 1] __a = self.sigmas_interpol[step_index] __a = self.sigmas[step_index] # currently only gamma=0 is supported. This usually works best anyways. # We can support gamma in the future but then need to scale the timestep before # passing it to the model which requires a change in API __a = 0 __a = sigma * (gamma + 1) # Note: sigma_hat == sigma for now # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise if self.config.prediction_type == "epsilon": __a = sigma_hat if self.state_in_first_order else sigma_interpol __a = sample - sigma_input * model_output elif self.config.prediction_type == "v_prediction": __a = sigma_hat if self.state_in_first_order else sigma_interpol __a = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + ( sample / (sigma_input**2 + 1) ) elif self.config.prediction_type == "sample": raise NotImplementedError('''prediction_type not implemented yet: sample''' ) else: raise ValueError( f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' ) if self.state_in_first_order: # 2. Convert to an ODE derivative for 1st order __a = (sample - pred_original_sample) / sigma_hat # 3. delta timestep __a = sigma_interpol - sigma_hat # store for 2nd order step __a = sample else: # DPM-Solver-2 # 2. Convert to an ODE derivative for 2nd order __a = (sample - pred_original_sample) / sigma_interpol # 3. delta timestep __a = sigma_next - sigma_hat __a = self.sample __a = None __a = sample + derivative * dt if not return_dict: return (prev_sample,) return SchedulerOutput(prev_sample=_a ) def __UpperCAmelCase ( self , _a , _a , _a , ): # Make sure sigmas and timesteps have the same device and dtype as original_samples __a = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype ) if original_samples.device.type == "mps" and torch.is_floating_point(_a ): # mps does not support float64 __a = self.timesteps.to(original_samples.device , dtype=torch.floataa ) __a = timesteps.to(original_samples.device , dtype=torch.floataa ) else: __a = self.timesteps.to(original_samples.device ) __a = timesteps.to(original_samples.device ) __a = [self.index_for_timestep(_a , _a ) for t in timesteps] __a = sigmas[step_indices].flatten() while len(sigma.shape ) < len(original_samples.shape ): __a = sigma.unsqueeze(-1 ) __a = original_samples + noise * sigma return noisy_samples def __len__( self ): return self.config.num_train_timesteps
11
1
'''simple docstring''' import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() @property def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" A : Optional[int] = 1 A : str = 3 A : str = (32, 32) A : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE__ ) return image @property def __lowerCAmelCase ( self ) -> List[str]: """simple docstring""" torch.manual_seed(0 ) A : int = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def __lowerCAmelCase ( self ) -> int: """simple docstring""" torch.manual_seed(0 ) A : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" torch.manual_seed(0 ) A : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(SCREAMING_SNAKE_CASE__ ) @property def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" def extract(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ): class A : def __init__( self ) -> str: """simple docstring""" A : str = torch.ones([0] ) def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE ) -> Optional[Any]: """simple docstring""" self.pixel_values.to(SCREAMING_SNAKE_CASE__ ) return self return Out() return extract def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : List[Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : Tuple = self.dummy_cond_unet A : List[str] = DDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=SCREAMING_SNAKE_CASE__ , set_alpha_to_one=SCREAMING_SNAKE_CASE__ , ) A : Optional[Any] = self.dummy_vae A : List[str] = self.dummy_text_encoder A : Tuple = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk A : List[Any] = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) A : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) A : str = '''A painting of a squirrel eating a burger''' A : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) A : Optional[Any] = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) A : Dict = output.images A : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) A : Optional[int] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE__ , )[0] A : Any = image[0, -3:, -3:, -1] A : List[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A : Optional[int] = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" A : List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator A : int = self.dummy_cond_unet A : Union[str, Any] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ ) A : List[Any] = self.dummy_vae A : Optional[Any] = self.dummy_text_encoder A : Union[str, Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk A : Any = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) A : str = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) A : Union[str, Any] = '''A painting of a squirrel eating a burger''' A : Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) A : Tuple = sd_pipe([prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) A : Optional[Any] = output.images A : str = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(0 ) A : Optional[Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=SCREAMING_SNAKE_CASE__ , )[0] A : Optional[int] = image[0, -3:, -3:, -1] A : Optional[Any] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) A : int = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ) -> List[Any]: """simple docstring""" A : Dict = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=SCREAMING_SNAKE_CASE__ ) assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert isinstance(pipe.scheduler , SCREAMING_SNAKE_CASE__ ) assert pipe.safety_checker is None A : Tuple = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(SCREAMING_SNAKE_CASE__ ) A : List[str] = StableDiffusionPipeline.from_pretrained(SCREAMING_SNAKE_CASE__ ) # sanity check that the pipeline still works assert pipe.safety_checker is None A : Optional[int] = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def __lowerCAmelCase ( self ) -> Optional[Any]: """simple docstring""" A : Any = self.dummy_cond_unet A : List[str] = PNDMScheduler(skip_prk_steps=SCREAMING_SNAKE_CASE__ ) A : Dict = self.dummy_vae A : List[Any] = self.dummy_text_encoder A : Any = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 A : Optional[int] = unet.half() A : Dict = vae.half() A : Optional[Any] = bert.half() # make sure here that pndm scheduler skips prk A : List[Any] = StableDiffusionPipeline( unet=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , safety_checker=SCREAMING_SNAKE_CASE__ , feature_extractor=self.dummy_extractor , ) A : int = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) A : str = '''A painting of a squirrel eating a burger''' A : Any = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class A ( unittest.TestCase ): def __lowerCAmelCase ( self ) -> Tuple: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowerCAmelCase ( self ) -> Any: """simple docstring""" A : Tuple = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE__ ) A : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A : List[str] = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) A : str = ( '''portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle''' ''' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with''' ''' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and''' ''' children from bahnhof zoo, detailed ''' ) A : Union[str, Any] = 4003660346 A : Any = 7 # without safety guidance (sld_guidance_scale = 0) A : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) A : List[Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) A : str = output.images A : Any = image[0, -3:, -3:, -1] A : Optional[Any] = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 # without safety guidance (strong configuration) A : List[Any] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) A : List[Any] = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A : Union[str, Any] = output.images A : Optional[Any] = image[0, -3:, -3:, -1] A : List[str] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ) -> str: """simple docstring""" A : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=SCREAMING_SNAKE_CASE__ ) A : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) A : Dict = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) A : List[Any] = '''padme amidala taking a bath artwork, safe for work, no nudity''' A : List[str] = 2734971755 A : Union[str, Any] = 7 A : int = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) A : str = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) A : Dict = output.images A : str = image[0, -3:, -3:, -1] A : Dict = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 A : Dict = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) A : Dict = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A : Any = output.images A : Union[str, Any] = image[0, -3:, -3:, -1] A : Tuple = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 def __lowerCAmelCase ( self ) -> Optional[int]: """simple docstring""" A : int = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) A : Tuple = sd_pipe.to(SCREAMING_SNAKE_CASE__ ) sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) A : Optional[int] = ( '''the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.''' ''' leyendecker''' ) A : Dict = 1044355234 A : List[Any] = 12 A : Optional[int] = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) A : Dict = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) A : Optional[int] = output.images A : str = image[0, -3:, -3:, -1] A : List[str] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7 A : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) A : int = sd_pipe( [prompt] , generator=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.025 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) A : Union[str, Any] = output.images A : Dict = image[0, -3:, -3:, -1] A : Optional[Any] = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
3
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "google/vivit-b-16x2-kinetics400": ( "https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json" ), # See all Vivit models at https://huggingface.co/models?filter=vivit } class _SCREAMING_SNAKE_CASE( A ): SCREAMING_SNAKE_CASE_ : List[str] = '''vivit''' def __init__( self ,SCREAMING_SNAKE_CASE__=2_24 ,SCREAMING_SNAKE_CASE__=32 ,SCREAMING_SNAKE_CASE__=[2, 16, 16] ,SCREAMING_SNAKE_CASE__=3 ,SCREAMING_SNAKE_CASE__=7_68 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=12 ,SCREAMING_SNAKE_CASE__=30_72 ,SCREAMING_SNAKE_CASE__="gelu_fast" ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0 ,SCREAMING_SNAKE_CASE__=0.0_2 ,SCREAMING_SNAKE_CASE__=1E-06 ,SCREAMING_SNAKE_CASE__=True ,**SCREAMING_SNAKE_CASE__ ,) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :int = hidden_size __SCREAMING_SNAKE_CASE :List[Any] = num_hidden_layers __SCREAMING_SNAKE_CASE :Union[str, Any] = num_attention_heads __SCREAMING_SNAKE_CASE :Union[str, Any] = intermediate_size __SCREAMING_SNAKE_CASE :Any = hidden_act __SCREAMING_SNAKE_CASE :Optional[Any] = hidden_dropout_prob __SCREAMING_SNAKE_CASE :str = attention_probs_dropout_prob __SCREAMING_SNAKE_CASE :Any = initializer_range __SCREAMING_SNAKE_CASE :Optional[int] = layer_norm_eps __SCREAMING_SNAKE_CASE :Optional[int] = image_size __SCREAMING_SNAKE_CASE :List[str] = num_frames __SCREAMING_SNAKE_CASE :Any = tubelet_size __SCREAMING_SNAKE_CASE :str = num_channels __SCREAMING_SNAKE_CASE :Any = qkv_bias super().__init__(**SCREAMING_SNAKE_CASE__ )
191
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''facebook/timesformer''': '''https://huggingface.co/facebook/timesformer/resolve/main/config.json''', } class __lowerCAmelCase ( A__ ): lowerCamelCase_ : Tuple = '''timesformer''' def __init__(self , __magic_name__=224 , __magic_name__=16 , __magic_name__=3 , __magic_name__=8 , __magic_name__=768 , __magic_name__=12 , __magic_name__=12 , __magic_name__=3072 , __magic_name__="gelu" , __magic_name__=0.0 , __magic_name__=0.0 , __magic_name__=0.02 , __magic_name__=1e-6 , __magic_name__=True , __magic_name__="divided_space_time" , __magic_name__=0 , **__magic_name__ , ) -> Optional[int]: '''simple docstring''' super().__init__(**__snake_case ) snake_case_ : Any = image_size snake_case_ : Union[str, Any] = patch_size snake_case_ : Optional[int] = num_channels snake_case_ : Dict = num_frames snake_case_ : Tuple = hidden_size snake_case_ : Union[str, Any] = num_hidden_layers snake_case_ : str = num_attention_heads snake_case_ : str = intermediate_size snake_case_ : str = hidden_act snake_case_ : Optional[int] = hidden_dropout_prob snake_case_ : List[Any] = attention_probs_dropout_prob snake_case_ : List[Any] = initializer_range snake_case_ : Any = layer_norm_eps snake_case_ : Optional[Any] = qkv_bias snake_case_ : List[Any] = attention_type snake_case_ : List[Any] = drop_path_rate
353
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> int: """simple docstring""" if os.path.exists(_UpperCamelCase ): if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(_UpperCamelCase , '''config.json''' ) ): os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> Optional[int]: """simple docstring""" snake_case_ : List[Any] = 2 if unlogit: snake_case_ : Any = torch.pow(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase ) snake_case_ : Dict = 0 return -plogp.sum(dim=-1 ) def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) ) for row in range(len(_UpperCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Union[str, Any]: """simple docstring""" snake_case_ , snake_case_ : int = model.config.num_hidden_layers, model.config.num_attention_heads snake_case_ : int = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device ) snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device ) if head_mask is None: snake_case_ : Tuple = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=_UpperCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: snake_case_ : Dict = None snake_case_ : Tuple = 0.0 snake_case_ : Dict = 0.0 for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): snake_case_ : Any = tuple(t.to(args.device ) for t in inputs ) ((snake_case_) , ) : Union[str, Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) snake_case_ , snake_case_ , snake_case_ : int = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_UpperCamelCase ): snake_case_ : Dict = entropy(attn.detach() , _UpperCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: snake_case_ : Union[str, Any] = 2 snake_case_ : Any = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: snake_case_ : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(_UpperCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(_UpperCamelCase ) logger.info('''Head ranked by importance scores''' ) snake_case_ : Optional[int] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) snake_case_ : Union[str, Any] = torch.arange( head_importance.numel() , device=args.device ) snake_case_ : Dict = head_ranks.view_as(_UpperCamelCase ) print_ad_tensor(_UpperCamelCase ) return attn_entropy, head_importance, total_loss def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ , snake_case_ , snake_case_ : Optional[int] = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase ) snake_case_ : Any = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold ) snake_case_ : Any = torch.ones_like(_UpperCamelCase ) snake_case_ : Union[str, Any] = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) snake_case_ : List[Any] = original_score while current_score >= original_score * args.masking_threshold: snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads snake_case_ : Optional[Any] = float('''Inf''' ) snake_case_ : List[Any] = head_importance.view(-1 ).sort()[1] if len(_UpperCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads snake_case_ : Optional[int] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) snake_case_ : Optional[Any] = new_head_mask.view(-1 ) snake_case_ : int = 0.0 snake_case_ : List[Any] = new_head_mask.view_as(_UpperCamelCase ) snake_case_ : List[str] = new_head_mask.clone().detach() print_ad_tensor(_UpperCamelCase ) # Compute metric and head importance again snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase ) snake_case_ : Tuple = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(_UpperCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> str: """simple docstring""" snake_case_ : str = datetime.now() snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase ) snake_case_ : Union[str, Any] = 1 / loss snake_case_ : Union[str, Any] = datetime.now() - before_time snake_case_ : int = sum(p.numel() for p in model.parameters() ) snake_case_ : Tuple = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Any = [ v, ] assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_UpperCamelCase ) snake_case_ : Union[str, Any] = sum(p.numel() for p in model.parameters() ) snake_case_ : Dict = datetime.now() snake_case_ , snake_case_ , snake_case_ : Union[str, Any] = compute_heads_importance( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , ) snake_case_ : Union[str, Any] = 1 / loss snake_case_ : Optional[Any] = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(_UpperCamelCase , args.output_dir ) def lowerCamelCase_ ( ) -> Optional[int]: """simple docstring""" snake_case_ : Tuple = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) snake_case_ : Any = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: snake_case_ : Tuple = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) snake_case_ : Tuple = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) snake_case_ : List[str] = torch.device('''cuda''' , args.local_rank ) snake_case_ : Union[str, Any] = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) snake_case_ : int = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: snake_case_ : Any = nn.parallel.DistributedDataParallel( _UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase ) elif args.n_gpu > 1: snake_case_ : Dict = nn.DataParallel(_UpperCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_UpperCamelCase ) torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase ) # Prepare dataset snake_case_ : str = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) snake_case_ : Any = (torch.from_numpy(_UpperCamelCase ),) snake_case_ : Any = TensorDataset(*_UpperCamelCase ) snake_case_ : List[str] = RandomSampler(_UpperCamelCase ) snake_case_ : int = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: snake_case_ : List[str] = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": main()
279
0
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ASTConfig from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_torchaudio_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ASTForAudioClassification, ASTModel from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import ( AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) if is_torchaudio_available(): import torchaudio from transformers import ASTFeatureExtractor class __lowerCAmelCase : def __init__( self :List[Any] , __magic_name__ :Union[str, Any] , __magic_name__ :Any=13 , __magic_name__ :Optional[Any]=2 , __magic_name__ :Dict=24 , __magic_name__ :Any=16 , __magic_name__ :List[str]=True , __magic_name__ :Tuple=True , __magic_name__ :Tuple=32 , __magic_name__ :int=5 , __magic_name__ :List[str]=4 , __magic_name__ :int=37 , __magic_name__ :Optional[int]="gelu" , __magic_name__ :Optional[int]=0.1 , __magic_name__ :List[Any]=0.1 , __magic_name__ :Union[str, Any]=10 , __magic_name__ :List[Any]=0.02 , __magic_name__ :Optional[int]=None , __magic_name__ :Optional[int]=2 , __magic_name__ :Dict=2 , ): '''simple docstring''' a = parent a = batch_size a = patch_size a = max_length a = num_mel_bins a = is_training a = use_labels a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = type_sequence_label_size a = initializer_range a = scope a = frequency_stride a = time_stride # in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) a = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1 a = (self.max_length - self.patch_size) // self.time_stride + 1 a = frequency_out_dimension * time_out_dimension a = num_patches + 2 def lowerCamelCase__ ( self :str ): '''simple docstring''' a = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] ) a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = self.get_config() return config, input_values, labels def lowerCamelCase__ ( self :Optional[Any] ): '''simple docstring''' return ASTConfig( patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__magic_name__ , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , ) def lowerCamelCase__ ( self :Tuple , __magic_name__ :Tuple , __magic_name__ :Optional[int] , __magic_name__ :Tuple ): '''simple docstring''' a = ASTModel(config=__magic_name__ ) model.to(__magic_name__ ) model.eval() a = model(__magic_name__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ) = config_and_inputs a = {"""input_values""": input_values} return config, inputs_dict @require_torch class __lowerCAmelCase ( __magic_name__ , __magic_name__ , unittest.TestCase ): UpperCamelCase__ = ( ( ASTModel, ASTForAudioClassification, ) if is_torch_available() else () ) UpperCamelCase__ = ( {'''audio-classification''': ASTForAudioClassification, '''feature-extraction''': ASTModel} if is_torch_available() else {} ) UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False UpperCamelCase__ = False def lowerCamelCase__ ( self :Dict , __magic_name__ :Tuple , __magic_name__ :int , __magic_name__ :Dict , __magic_name__ :Union[str, Any] , __magic_name__ :Tuple ): '''simple docstring''' if pipeline_test_casse_name == "AudioClassificationPipelineTests": return True return False def lowerCamelCase__ ( self :Tuple ): '''simple docstring''' a = ASTModelTester(self ) a = ConfigTester(self , config_class=__magic_name__ , has_text_modality=__magic_name__ , hidden_size=37 ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="""AST does not use inputs_embeds""" ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' pass def lowerCamelCase__ ( self :Optional[int] ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__magic_name__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__magic_name__ , nn.Linear ) ) def lowerCamelCase__ ( self :List[Any] ): '''simple docstring''' a , a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: a = model_class(__magic_name__ ) a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic a = [*signature.parameters.keys()] a = ["""input_values"""] self.assertListEqual(arg_names[:1] , __magic_name__ ) def lowerCamelCase__ ( self :str ): '''simple docstring''' a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__magic_name__ ) @slow def lowerCamelCase__ ( self :Union[str, Any] ): '''simple docstring''' for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = ASTModel.from_pretrained(__magic_name__ ) self.assertIsNotNone(__magic_name__ ) def __A ( ) -> Union[str, Any]: a = hf_hub_download( repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" ) a , a = torchaudio.load(__lowerCamelCase ) return audio, sampling_rate @require_torch @require_torchaudio class __lowerCAmelCase ( unittest.TestCase ): @cached_property def lowerCamelCase__ ( self :Dict ): '''simple docstring''' return ( ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ) if is_torchaudio_available() else None ) @slow def lowerCamelCase__ ( self :int ): '''simple docstring''' a = self.default_feature_extractor a = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(__magic_name__ ) a = self.default_feature_extractor a , a = prepare_audio() a = audio.squeeze().numpy() a = feature_extractor(__magic_name__ , sampling_rate=__magic_name__ , return_tensors="""pt""" ).to(__magic_name__ ) # forward pass with torch.no_grad(): a = model(**__magic_name__ ) # verify the logits a = torch.Size((1, 527) ) self.assertEqual(outputs.logits.shape , __magic_name__ ) a = torch.tensor([-0.8760, -7.0042, -8.6602] ).to(__magic_name__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __magic_name__ , atol=1E-4 ) )
228
def __A ( __lowerCamelCase , __lowerCamelCase ) -> str: if not isinstance(__lowerCamelCase , __lowerCamelCase ): raise ValueError("""iterations must be defined as integers""" ) if not isinstance(__lowerCamelCase , __lowerCamelCase ) or not number >= 1: raise ValueError( """starting number must be and integer and be more than 0""" ) if not iterations >= 1: raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" ) a = """""" while number <= iterations: if number % 3 == 0: out += "Fizz" if number % 5 == 0: out += "Buzz" if 0 not in (number % 3, number % 5): out += str(__lowerCamelCase ) # print(out) number += 1 out += " " return out if __name__ == "__main__": import doctest doctest.testmod()
228
1
"""simple docstring""" import argparse import os import jax as jnp import numpy as onp import torch import torch.nn as nn from music_spectrogram_diffusion import inference from tax import checkpoints from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder __lowercase = '''base_with_context''' def lowerCAmelCase (__UpperCamelCase : Tuple , __UpperCamelCase : List[str] ): """simple docstring""" __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__UpperCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): __UpperCamelCase =weights[F"""layers_{lyr_num}"""] __UpperCamelCase =nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __UpperCamelCase =ly_weight['''attention'''] __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def lowerCAmelCase (__UpperCamelCase : str , __UpperCamelCase : Dict ): """simple docstring""" __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__UpperCamelCase ) for lyr_num, lyr in enumerate(model.encoders ): __UpperCamelCase =weights[F"""layers_{lyr_num}"""] __UpperCamelCase =ly_weight['''attention'''] __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) ) return model def lowerCAmelCase (__UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ): """simple docstring""" __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=__UpperCamelCase ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) ) for lyr_num, lyr in enumerate(model.decoders ): __UpperCamelCase =weights[F"""layers_{lyr_num}"""] __UpperCamelCase =nn.Parameter( torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) ) __UpperCamelCase =ly_weight['''self_attention'''] __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __UpperCamelCase =ly_weight['''MultiHeadDotProductAttention_0'''] __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) ) __UpperCamelCase =nn.Parameter( torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) ) __UpperCamelCase =nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) ) return model def lowerCAmelCase (__UpperCamelCase : Union[str, Any] ): """simple docstring""" __UpperCamelCase =checkpoints.load_tax_checkpoint(args.checkpoint_path ) __UpperCamelCase =jnp.tree_util.tree_map(onp.array , __UpperCamelCase ) __UpperCamelCase =[ '''from __gin__ import dynamic_registration''', '''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''', '''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''', '''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''', ] __UpperCamelCase =os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' ) __UpperCamelCase =inference.parse_training_gin_file(__UpperCamelCase , __UpperCamelCase ) __UpperCamelCase =inference.InferenceModel(args.checkpoint_path , __UpperCamelCase ) __UpperCamelCase =DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' ) __UpperCamelCase =SpectrogramNotesEncoder( max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __UpperCamelCase =SpectrogramContEncoder( input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , ) __UpperCamelCase =TaFilmDecoder( input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , ) __UpperCamelCase =load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , __UpperCamelCase ) __UpperCamelCase =load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , __UpperCamelCase ) __UpperCamelCase =load_decoder(ta_checkpoint['''target''']['''decoder'''] , __UpperCamelCase ) __UpperCamelCase =OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' ) __UpperCamelCase =SpectrogramDiffusionPipeline( notes_encoder=__UpperCamelCase , continuous_encoder=__UpperCamelCase , decoder=__UpperCamelCase , scheduler=__UpperCamelCase , melgan=__UpperCamelCase , ) if args.save: pipe.save_pretrained(args.output_path ) if __name__ == "__main__": __lowercase = argparse.ArgumentParser() parser.add_argument('''--output_path''', default=None, type=str, required=True, help='''Path to the converted model.''') parser.add_argument( '''--save''', default=True, type=bool, required=False, help='''Whether to save the converted model or not.''' ) parser.add_argument( '''--checkpoint_path''', default=f'''{MODEL}/checkpoint_500000''', type=str, required=False, help='''Path to the original jax model checkpoint.''', ) __lowercase = parser.parse_args() main(args)
85
"""simple docstring""" from __future__ import annotations from collections.abc import Iterator from typing import Any class _lowercase : """simple docstring""" def __init__( self : int , UpperCamelCase__ : Any ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase =data __UpperCamelCase =None class _lowercase : """simple docstring""" def __init__( self : Union[str, Any] ) -> List[Any]: '''simple docstring''' __UpperCamelCase =None __UpperCamelCase =None def __iter__( self : int ) -> Iterator[Any]: '''simple docstring''' __UpperCamelCase =self.head while self.head: yield node.data __UpperCamelCase =node.next if node == self.head: break def __len__( self : Union[str, Any] ) -> int: '''simple docstring''' return sum(1 for _ in self ) def __repr__( self : str ) -> Union[str, Any]: '''simple docstring''' return "->".join(str(UpperCamelCase__ ) for item in iter(self ) ) def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : Any ) -> None: '''simple docstring''' self.insert_nth(len(self ) , UpperCamelCase__ ) def UpperCAmelCase_ ( self : Optional[int] , UpperCamelCase__ : Any ) -> None: '''simple docstring''' self.insert_nth(0 , UpperCamelCase__ ) def UpperCAmelCase_ ( self : List[str] , UpperCamelCase__ : int , UpperCamelCase__ : Any ) -> None: '''simple docstring''' if index < 0 or index > len(self ): raise IndexError('''list index out of range.''' ) __UpperCamelCase =Node(UpperCamelCase__ ) if self.head is None: __UpperCamelCase =new_node # first node points itself __UpperCamelCase =__UpperCamelCase =new_node elif index == 0: # insert at head __UpperCamelCase =self.head __UpperCamelCase =__UpperCamelCase =new_node else: __UpperCamelCase =self.head for _ in range(index - 1 ): __UpperCamelCase =temp.next __UpperCamelCase =temp.next __UpperCamelCase =new_node if index == len(self ) - 1: # insert at tail __UpperCamelCase =new_node def UpperCAmelCase_ ( self : Any ) -> Any: '''simple docstring''' return self.delete_nth(0 ) def UpperCAmelCase_ ( self : Optional[int] ) -> Any: '''simple docstring''' return self.delete_nth(len(self ) - 1 ) def UpperCAmelCase_ ( self : int , UpperCamelCase__ : int = 0 ) -> Any: '''simple docstring''' if not 0 <= index < len(self ): raise IndexError('''list index out of range.''' ) __UpperCamelCase =self.head if self.head == self.tail: # just one node __UpperCamelCase =__UpperCamelCase =None elif index == 0: # delete head node __UpperCamelCase =self.tail.next.next __UpperCamelCase =self.head.next else: __UpperCamelCase =self.head for _ in range(index - 1 ): __UpperCamelCase =temp.next __UpperCamelCase =temp.next __UpperCamelCase =temp.next.next if index == len(self ) - 1: # delete at tail __UpperCamelCase =temp return delete_node.data def UpperCAmelCase_ ( self : str ) -> bool: '''simple docstring''' return len(self ) == 0 def lowerCAmelCase (): """simple docstring""" __UpperCamelCase =CircularLinkedList() assert len(__UpperCamelCase ) == 0 assert circular_linked_list.is_empty() is True assert str(__UpperCamelCase ) == "" try: circular_linked_list.delete_front() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_tail() raise AssertionError # This should not happen except IndexError: assert True # This should happen try: circular_linked_list.delete_nth(-1 ) raise AssertionError except IndexError: assert True try: circular_linked_list.delete_nth(0 ) raise AssertionError except IndexError: assert True assert circular_linked_list.is_empty() is True for i in range(5 ): assert len(__UpperCamelCase ) == i circular_linked_list.insert_nth(__UpperCamelCase , i + 1 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) ) circular_linked_list.insert_tail(6 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 7 ) ) circular_linked_list.insert_head(0 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(0 , 7 ) ) assert circular_linked_list.delete_front() == 0 assert circular_linked_list.delete_tail() == 6 assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.delete_nth(2 ) == 3 circular_linked_list.insert_nth(2 , 3 ) assert str(__UpperCamelCase ) == "->".join(str(__UpperCamelCase ) for i in range(1 , 6 ) ) assert circular_linked_list.is_empty() is False if __name__ == "__main__": import doctest doctest.testmod()
85
1
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) a__ = { '''configuration_xlm_roberta''': [ '''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMRobertaConfig''', '''XLMRobertaOnnxConfig''', ], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ['''XLMRobertaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = ['''XLMRobertaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMRobertaForCausalLM''', '''XLMRobertaForMaskedLM''', '''XLMRobertaForMultipleChoice''', '''XLMRobertaForQuestionAnswering''', '''XLMRobertaForSequenceClassification''', '''XLMRobertaForTokenClassification''', '''XLMRobertaModel''', '''XLMRobertaPreTrainedModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMRobertaForCausalLM''', '''TFXLMRobertaForMaskedLM''', '''TFXLMRobertaForMultipleChoice''', '''TFXLMRobertaForQuestionAnswering''', '''TFXLMRobertaForSequenceClassification''', '''TFXLMRobertaForTokenClassification''', '''TFXLMRobertaModel''', '''TFXLMRobertaPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a__ = [ '''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxXLMRobertaForMaskedLM''', '''FlaxXLMRobertaForCausalLM''', '''FlaxXLMRobertaForMultipleChoice''', '''FlaxXLMRobertaForQuestionAnswering''', '''FlaxXLMRobertaForSequenceClassification''', '''FlaxXLMRobertaForTokenClassification''', '''FlaxXLMRobertaModel''', '''FlaxXLMRobertaPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig, XLMRobertaOnnxConfig, ) try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta import XLMRobertaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, XLMRobertaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForCausalLM, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, TFXLMRobertaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_xlm_roberta import ( FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxXLMRobertaForCausalLM, FlaxXLMRobertaForMaskedLM, FlaxXLMRobertaForMultipleChoice, FlaxXLMRobertaForQuestionAnswering, FlaxXLMRobertaForSequenceClassification, FlaxXLMRobertaForTokenClassification, FlaxXLMRobertaModel, FlaxXLMRobertaPreTrainedModel, ) else: import sys a__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
235
import math from typing import Dict, Iterable, List, Optional, Tuple, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format from ...image_utils import ( IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD, ChannelDimension, ImageInput, PILImageResampling, get_image_size, is_torch_available, is_torch_tensor, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_torch_available(): import torch if is_vision_available(): import PIL a__ = logging.get_logger(__name__) def __UpperCAmelCase ( __a : np.ndarray ,__a : Union[int, Iterable[int]] ,__a : bool ,__a : int ) -> Tuple[int, int]: """simple docstring""" def constraint_to_multiple_of(__a : List[str] ,__a : Dict ,__a : Any=0 ,__a : int=None ): _a : Dict = round(val / multiple ) * multiple if max_val is not None and x > max_val: _a : Any = math.floor(val / multiple ) * multiple if x < min_val: _a : Dict = math.ceil(val / multiple ) * multiple return x _a : Union[str, Any] = (output_size, output_size) if isinstance(__a ,__a ) else output_size _a , _a : List[Any] = get_image_size(__a ) _a , _a : Any = output_size # determine new height and width _a : Union[str, Any] = output_height / input_height _a : Tuple = output_width / input_width if keep_aspect_ratio: # scale as little as possible if abs(1 - scale_width ) < abs(1 - scale_height ): # fit width _a : Optional[Any] = scale_width else: # fit height _a : Tuple = scale_height _a : Optional[Any] = constraint_to_multiple_of(scale_height * input_height ,multiple=__a ) _a : int = constraint_to_multiple_of(scale_width * input_width ,multiple=__a ) return (new_height, new_width) class UpperCAmelCase_ ( __lowercase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = ["pixel_values"] def __init__( self , _a = True , _a = None , _a = PILImageResampling.BILINEAR , _a = False , _a = 1 , _a = True , _a = 1 / 2_5_5 , _a = True , _a = None , _a = None , **_a , ) -> None: super().__init__(**_a ) _a : Optional[int] = size if size is not None else {'''height''': 3_8_4, '''width''': 3_8_4} _a : Optional[Any] = get_size_dict(_a ) _a : Any = do_resize _a : Dict = size _a : str = keep_aspect_ratio _a : Any = ensure_multiple_of _a : Optional[Any] = resample _a : List[Any] = do_rescale _a : int = rescale_factor _a : Any = do_normalize _a : Union[str, Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN _a : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD def __lowercase ( self , _a , _a , _a = False , _a = 1 , _a = PILImageResampling.BICUBIC , _a = None , **_a , ) -> np.ndarray: _a : str = get_size_dict(_a ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) _a : Optional[Any] = get_resize_output_image_size( _a , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_a , multiple=_a , ) return resize(_a , size=_a , resample=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a = None , **_a , ) -> int: return rescale(_a , scale=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a , _a , _a = None , **_a , ) -> np.ndarray: return normalize(_a , mean=_a , std=_a , data_format=_a , **_a ) def __lowercase ( self , _a , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = None , _a = ChannelDimension.FIRST , **_a , ) -> PIL.Image.Image: _a : Optional[int] = do_resize if do_resize is not None else self.do_resize _a : Union[str, Any] = size if size is not None else self.size _a : str = get_size_dict(_a ) _a : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio _a : Optional[Any] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of _a : str = resample if resample is not None else self.resample _a : str = do_rescale if do_rescale is not None else self.do_rescale _a : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor _a : List[Any] = do_normalize if do_normalize is not None else self.do_normalize _a : str = image_mean if image_mean is not None else self.image_mean _a : Tuple = image_std if image_std is not None else self.image_std _a : Dict = make_list_of_images(_a ) if not valid_images(_a ): raise ValueError( '''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ''' '''torch.Tensor, tf.Tensor or jax.ndarray.''' ) if do_resize and size is None or resample is None: raise ValueError('''Size and resample must be specified if do_resize is True.''' ) if do_rescale and rescale_factor is None: raise ValueError('''Rescale factor must be specified if do_rescale is True.''' ) if do_normalize and (image_mean is None or image_std is None): raise ValueError('''Image mean and std must be specified if do_normalize is True.''' ) # All transformations expect numpy arrays. _a : Dict = [to_numpy_array(_a ) for image in images] if do_resize: _a : int = [self.resize(image=_a , size=_a , resample=_a ) for image in images] if do_rescale: _a : Optional[Any] = [self.rescale(image=_a , scale=_a ) for image in images] if do_normalize: _a : Optional[int] = [self.normalize(image=_a , mean=_a , std=_a ) for image in images] _a : int = [to_channel_dimension_format(_a , _a ) for image in images] _a : Tuple = {'''pixel_values''': images} return BatchFeature(data=_a , tensor_type=_a ) def __lowercase ( self , _a , _a = None ) -> Any: _a : Optional[Any] = outputs.logits # Resize logits and compute semantic segmentation maps if target_sizes is not None: if len(_a ) != len(_a ): raise ValueError( '''Make sure that you pass in as many target sizes as the batch dimension of the logits''' ) if is_torch_tensor(_a ): _a : List[Any] = target_sizes.numpy() _a : str = [] for idx in range(len(_a ) ): _a : str = torch.nn.functional.interpolate( logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_a ) _a : Union[str, Any] = resized_logits[0].argmax(dim=0 ) semantic_segmentation.append(_a ) else: _a : Tuple = logits.argmax(dim=1 ) _a : Union[str, Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )] return semantic_segmentation
235
1
import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowercase : '''simple docstring''' def __init__(self , __a , __a=13 , __a=32 , __a=2 , __a=3 , __a=16 , __a=[1, 2, 1] , __a=[2, 2, 4] , __a=2 , __a=2.0 , __a=True , __a=0.0 , __a=0.0 , __a=0.1 , __a="gelu" , __a=False , __a=True , __a=0.02 , __a=1E-5 , __a=True , __a=None , __a=True , __a=10 , __a=8 , ) -> str: """simple docstring""" UpperCAmelCase__ = parent UpperCAmelCase__ = batch_size UpperCAmelCase__ = image_size UpperCAmelCase__ = patch_size UpperCAmelCase__ = num_channels UpperCAmelCase__ = embed_dim UpperCAmelCase__ = depths UpperCAmelCase__ = num_heads UpperCAmelCase__ = window_size UpperCAmelCase__ = mlp_ratio UpperCAmelCase__ = qkv_bias UpperCAmelCase__ = hidden_dropout_prob UpperCAmelCase__ = attention_probs_dropout_prob UpperCAmelCase__ = drop_path_rate UpperCAmelCase__ = hidden_act UpperCAmelCase__ = use_absolute_embeddings UpperCAmelCase__ = patch_norm UpperCAmelCase__ = layer_norm_eps UpperCAmelCase__ = initializer_range UpperCAmelCase__ = is_training UpperCAmelCase__ = scope UpperCAmelCase__ = use_labels UpperCAmelCase__ = type_sequence_label_size UpperCAmelCase__ = encoder_stride def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ = None if self.use_labels: UpperCAmelCase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ = self.get_config() return config, pixel_values, labels def UpperCamelCase__ (self ) -> str: """simple docstring""" return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase__ (self , __a , __a , __a ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ = SwinvaModel(config=__a ) model.to(__a ) model.eval() UpperCAmelCase__ = model(__a ) UpperCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) UpperCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase__ (self , __a , __a , __a ) -> Any: """simple docstring""" UpperCAmelCase__ = SwinvaForMaskedImageModeling(config=__a ) model.to(__a ) model.eval() UpperCAmelCase__ = model(__a ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images UpperCAmelCase__ = 1 UpperCAmelCase__ = SwinvaForMaskedImageModeling(__a ) model.to(__a ) model.eval() UpperCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ = model(__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase__ (self , __a , __a , __a ) -> Dict: """simple docstring""" UpperCAmelCase__ = self.type_sequence_label_size UpperCAmelCase__ = SwinvaForImageClassification(__a ) model.to(__a ) model.eval() UpperCAmelCase__ = model(__a , labels=__a ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = config_and_inputs UpperCAmelCase__ = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class lowercase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ): '''simple docstring''' __SCREAMING_SNAKE_CASE = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) __SCREAMING_SNAKE_CASE = ( {"""feature-extraction""": SwinvaModel, """image-classification""": SwinvaForImageClassification} if is_torch_available() else {} ) __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False __SCREAMING_SNAKE_CASE = False def UpperCamelCase__ (self ) -> Any: """simple docstring""" UpperCAmelCase__ = SwinvaModelTester(self ) UpperCAmelCase__ = ConfigTester(self , config_class=__a , embed_dim=37 ) def UpperCamelCase__ (self ) -> Any: """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__a ) @unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' ) def UpperCamelCase__ (self ) -> int: """simple docstring""" pass @unittest.skip(reason='Swinv2 does not use inputs_embeds' ) def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" pass def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(__a ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(__a , nn.Linear ) ) def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(__a ) UpperCAmelCase__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ = [*signature.parameters.keys()] UpperCAmelCase__ = ['pixel_values'] self.assertListEqual(arg_names[:1] , __a ) def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = True for model_class in self.all_model_classes: UpperCAmelCase__ = True UpperCAmelCase__ = False UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) ) UpperCAmelCase__ = outputs.attentions UpperCAmelCase__ = len(self.model_tester.depths ) self.assertEqual(len(__a ) , __a ) # check that output_attentions also work using config del inputs_dict["output_attentions"] UpperCAmelCase__ = True UpperCAmelCase__ = config.window_size**2 UpperCAmelCase__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) ) UpperCAmelCase__ = outputs.attentions self.assertEqual(len(__a ) , __a ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) UpperCAmelCase__ = len(__a ) # Check attention is always last and order is fine UpperCAmelCase__ = True UpperCAmelCase__ = True UpperCAmelCase__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) ) if hasattr(self.model_tester , 'num_hidden_states_types' ): UpperCAmelCase__ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states UpperCAmelCase__ = 2 self.assertEqual(out_len + added_hidden_states , len(__a ) ) UpperCAmelCase__ = outputs.attentions self.assertEqual(len(__a ) , __a ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def UpperCamelCase__ (self , __a , __a , __a , __a ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = model_class(__a ) model.to(__a ) model.eval() with torch.no_grad(): UpperCAmelCase__ = model(**self._prepare_for_class(__a , __a ) ) UpperCAmelCase__ = outputs.hidden_states UpperCAmelCase__ = getattr( self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(__a ) , __a ) # Swinv2 has a different seq_length UpperCAmelCase__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) UpperCAmelCase__ = outputs.reshaped_hidden_states self.assertEqual(len(__a ) , __a ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ = reshaped_hidden_states[0].shape UpperCAmelCase__ = ( reshaped_hidden_states[0].view(__a , __a , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCamelCase__ (self ) -> int: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: UpperCAmelCase__ = True self.check_hidden_states_output(__a , __a , __a , __a ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ = True self.check_hidden_states_output(__a , __a , __a , __a ) def UpperCamelCase__ (self ) -> Optional[Any]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = 3 UpperCAmelCase__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) UpperCAmelCase__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) UpperCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) UpperCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: UpperCAmelCase__ = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ = True self.check_hidden_states_output(__a , __a , __a , (padded_height, padded_width) ) def UpperCamelCase__ (self ) -> Tuple: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*__a ) def UpperCamelCase__ (self ) -> str: """simple docstring""" UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*__a ) @slow def UpperCamelCase__ (self ) -> Dict: """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ = SwinvaModel.from_pretrained(__a ) self.assertIsNotNone(__a ) def UpperCamelCase__ (self ) -> List[str]: """simple docstring""" UpperCAmelCase__ , UpperCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ = _config_zero_init(__a ) for model_class in self.all_model_classes: UpperCAmelCase__ = model_class(config=__a ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"Parameter {name} of model {model_class} seems not properly initialized" , ) @require_vision @require_torch class lowercase ( unittest.TestCase ): '''simple docstring''' @cached_property def UpperCamelCase__ (self ) -> Union[str, Any]: """simple docstring""" return ( AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ) if is_vision_available() else None ) @slow def UpperCamelCase__ (self ) -> Optional[int]: """simple docstring""" UpperCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to( __a ) UpperCAmelCase__ = self.default_image_processor UpperCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) UpperCAmelCase__ = image_processor(images=__a , return_tensors='pt' ).to(__a ) # forward pass with torch.no_grad(): UpperCAmelCase__ = model(**__a ) # verify the logits UpperCAmelCase__ = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , __a ) UpperCAmelCase__ = torch.tensor([-0.39_47, -0.43_06, 0.00_26] ).to(__a ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
335
import torch from torch import nn from ...configuration_utils import ConfigMixin, register_to_config from ...models import ModelMixin class lowercase ( _UpperCamelCase , _UpperCamelCase ): '''simple docstring''' @register_to_config def __init__(self , *, __a = 4 , __a = 768 , __a , __a , ) -> str: """simple docstring""" super().__init__() UpperCAmelCase__ = nn.Parameter(torch.zeros(__a ) ) # parameters for additional clip time embeddings UpperCAmelCase__ = nn.Linear(__a , __a ) UpperCAmelCase__ = nn.Linear(__a , __a ) # parameters for encoder hidden states UpperCAmelCase__ = clip_extra_context_tokens UpperCAmelCase__ = nn.Linear( __a , self.clip_extra_context_tokens * cross_attention_dim ) UpperCAmelCase__ = nn.Linear(__a , __a ) UpperCAmelCase__ = nn.LayerNorm(__a ) def UpperCamelCase__ (self , *, __a , __a , __a , __a ) -> Optional[Any]: """simple docstring""" if do_classifier_free_guidance: # Add the classifier free guidance embeddings to the image embeddings UpperCAmelCase__ = image_embeddings.shape[0] UpperCAmelCase__ = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 ) UpperCAmelCase__ = classifier_free_guidance_embeddings.expand( __a , -1 ) UpperCAmelCase__ = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 ) # The image embeddings batch size and the text embeddings batch size are equal assert image_embeddings.shape[0] == prompt_embeds.shape[0] UpperCAmelCase__ = prompt_embeds.shape[0] # "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and # adding CLIP embeddings to the existing timestep embedding, ... UpperCAmelCase__ = self.embedding_proj(__a ) UpperCAmelCase__ = self.clip_image_embeddings_project_to_time_embeddings(__a ) UpperCAmelCase__ = time_projected_image_embeddings + time_projected_prompt_embeds # ... and by projecting CLIP embeddings into four # extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder" UpperCAmelCase__ = self.clip_extra_context_tokens_proj(__a ) UpperCAmelCase__ = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens ) UpperCAmelCase__ = clip_extra_context_tokens.permute(0 , 2 , 1 ) UpperCAmelCase__ = self.encoder_hidden_states_proj(__a ) UpperCAmelCase__ = self.text_encoder_hidden_states_norm(__a ) UpperCAmelCase__ = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 ) return text_encoder_hidden_states, additive_clip_time_embeddings
335
1
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class snake_case_( a__ ): def __init__( self : str , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tuple ): super().__init__() # make sure scheduler can always be converted to DDIM lowerCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config ) self.register_modules(unet=UpperCamelCase_ , scheduler=UpperCamelCase_ ) @torch.no_grad() def __call__( self : List[str] , UpperCamelCase_ : int = 1 , UpperCamelCase_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , UpperCamelCase_ : float = 0.0 , UpperCamelCase_ : int = 5_0 , UpperCamelCase_ : Optional[bool] = None , UpperCamelCase_ : Optional[str] = "pil" , UpperCamelCase_ : bool = True , ): # Sample gaussian noise to begin loop if isinstance(self.unet.config.sample_size , UpperCamelCase_ ): lowerCAmelCase : Optional[int] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: lowerCAmelCase : List[str] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) and len(UpperCamelCase_ ) != batch_size: raise ValueError( F'''You have passed a list of generators of length {len(UpperCamelCase_ )}, but requested an effective batch''' F''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' ) lowerCAmelCase : Any = randn_tensor(UpperCamelCase_ , generator=UpperCamelCase_ , device=self.device , dtype=self.unet.dtype ) # set step values self.scheduler.set_timesteps(UpperCamelCase_ ) for t in self.progress_bar(self.scheduler.timesteps ): # 1. predict noise model_output lowerCAmelCase : Optional[int] = self.unet(UpperCamelCase_ , UpperCamelCase_ ).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 lowerCAmelCase : Optional[int] = self.scheduler.step( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , eta=UpperCamelCase_ , use_clipped_model_output=UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample lowerCAmelCase : Union[str, Any] = (image / 2 + 0.5).clamp(0 , 1 ) lowerCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 ).numpy() if output_type == "pil": lowerCAmelCase : List[str] = self.numpy_to_pil(UpperCamelCase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=UpperCamelCase_ )
60
def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" while b: SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = b, a % b return a def _snake_case ( lowerCAmelCase : int , lowerCAmelCase : int ): """simple docstring""" return a if b == 0 else euclidean_gcd_recursive(lowerCAmelCase , a % b ) def _snake_case ( ): """simple docstring""" print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' ) print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' ) print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' ) print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' ) print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' ) print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' ) print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' ) print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' ) if __name__ == "__main__": main()
18
0
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging __UpperCamelCase : Any = logging.get_logger(__name__) __UpperCamelCase : Optional[Any] = { '''microsoft/unispeech-sat-base-100h-libri-ft''': ( '''https://huggingface.co/microsoft/unispeech-sat-base-100h-libri-ft/resolve/main/config.json''' ), # See all UniSpeechSat models at https://huggingface.co/models?filter=unispeech_sat } class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" lowercase__ = "unispeech-sat" def __init__( self : Optional[int] ,lowercase_ : Optional[Any]=3_2 ,lowercase_ : List[Any]=7_6_8 ,lowercase_ : int=1_2 ,lowercase_ : Tuple=1_2 ,lowercase_ : List[Any]=3_0_7_2 ,lowercase_ : Dict="gelu" ,lowercase_ : Any=0.1 ,lowercase_ : str=0.1 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : List[Any]=0.0 ,lowercase_ : str=0.0 ,lowercase_ : Tuple=0.1 ,lowercase_ : Optional[int]=0.1 ,lowercase_ : Tuple=0.02 ,lowercase_ : str=1E-5 ,lowercase_ : Any="group" ,lowercase_ : Optional[Any]="gelu" ,lowercase_ : int=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) ,lowercase_ : str=(5, 2, 2, 2, 2, 2, 2) ,lowercase_ : Tuple=(1_0, 3, 3, 3, 3, 2, 2) ,lowercase_ : Optional[int]=False ,lowercase_ : Tuple=1_2_8 ,lowercase_ : Any=1_6 ,lowercase_ : int=False ,lowercase_ : Dict=True ,lowercase_ : List[Any]=0.05 ,lowercase_ : str=1_0 ,lowercase_ : Dict=2 ,lowercase_ : Dict=0.0 ,lowercase_ : str=1_0 ,lowercase_ : int=0 ,lowercase_ : List[Any]=3_2_0 ,lowercase_ : Union[str, Any]=2 ,lowercase_ : Optional[Any]=0.1 ,lowercase_ : Tuple=1_0_0 ,lowercase_ : Tuple=2_5_6 ,lowercase_ : int=2_5_6 ,lowercase_ : str=0.1 ,lowercase_ : Optional[int]="mean" ,lowercase_ : str=False ,lowercase_ : List[Any]=False ,lowercase_ : Any=2_5_6 ,lowercase_ : str=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) ,lowercase_ : Optional[int]=(5, 3, 3, 1, 1) ,lowercase_ : str=(1, 2, 3, 1, 1) ,lowercase_ : List[str]=5_1_2 ,lowercase_ : List[str]=0 ,lowercase_ : Optional[Any]=1 ,lowercase_ : List[str]=2 ,lowercase_ : Any=5_0_4 ,**lowercase_ : Optional[int] ,): super().__init__(**lowercase_ ,pad_token_id=lowercase_ ,bos_token_id=lowercase_ ,eos_token_id=lowercase_ ) lowerCAmelCase__ : Any = hidden_size lowerCAmelCase__ : Union[str, Any] = feat_extract_norm lowerCAmelCase__ : Dict = feat_extract_activation lowerCAmelCase__ : Optional[Any] = list(lowercase_ ) lowerCAmelCase__ : str = list(lowercase_ ) lowerCAmelCase__ : Optional[int] = list(lowercase_ ) lowerCAmelCase__ : Dict = conv_bias lowerCAmelCase__ : Dict = num_conv_pos_embeddings lowerCAmelCase__ : List[Any] = num_conv_pos_embedding_groups lowerCAmelCase__ : Dict = len(self.conv_dim ) lowerCAmelCase__ : str = num_hidden_layers lowerCAmelCase__ : Optional[Any] = intermediate_size lowerCAmelCase__ : Any = hidden_act lowerCAmelCase__ : Any = num_attention_heads lowerCAmelCase__ : str = hidden_dropout lowerCAmelCase__ : List[str] = attention_dropout lowerCAmelCase__ : Any = activation_dropout lowerCAmelCase__ : Optional[Any] = feat_proj_dropout lowerCAmelCase__ : Any = final_dropout lowerCAmelCase__ : Tuple = layerdrop lowerCAmelCase__ : Optional[int] = layer_norm_eps lowerCAmelCase__ : Any = initializer_range lowerCAmelCase__ : Optional[Any] = vocab_size lowerCAmelCase__ : Any = num_clusters lowerCAmelCase__ : str = do_stable_layer_norm lowerCAmelCase__ : Dict = use_weighted_layer_sum if ( (len(self.conv_stride ) != self.num_feat_extract_layers) or (len(self.conv_kernel ) != self.num_feat_extract_layers) or (len(self.conv_dim ) != self.num_feat_extract_layers) ): raise ValueError( '''Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` ==''' ''' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) =''' F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,' F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' ) # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 lowerCAmelCase__ : List[str] = apply_spec_augment lowerCAmelCase__ : Optional[int] = mask_time_prob lowerCAmelCase__ : Tuple = mask_time_length lowerCAmelCase__ : Union[str, Any] = mask_time_min_masks lowerCAmelCase__ : List[Any] = mask_feature_prob lowerCAmelCase__ : Optional[Any] = mask_feature_length lowerCAmelCase__ : List[Any] = mask_feature_min_masks # parameters for pretraining with codevector quantized representations lowerCAmelCase__ : Dict = num_codevectors_per_group lowerCAmelCase__ : Tuple = num_codevector_groups lowerCAmelCase__ : int = contrastive_logits_temperature lowerCAmelCase__ : Dict = feat_quantizer_dropout lowerCAmelCase__ : Any = num_negatives lowerCAmelCase__ : Dict = codevector_dim lowerCAmelCase__ : Dict = proj_codevector_dim lowerCAmelCase__ : Union[str, Any] = diversity_loss_weight # ctc loss lowerCAmelCase__ : List[str] = ctc_loss_reduction lowerCAmelCase__ : Optional[Any] = ctc_zero_infinity # SequenceClassification-specific parameter. Feel free to ignore for other classes. lowerCAmelCase__ : List[Any] = classifier_proj_size # XVector-specific parameters. Feel free to ignore for other classes. lowerCAmelCase__ : Dict = list(lowercase_ ) lowerCAmelCase__ : int = list(lowercase_ ) lowerCAmelCase__ : Dict = list(lowercase_ ) lowerCAmelCase__ : Tuple = xvector_output_dim @property def __lowerCAmelCase ( self : Dict ): return functools.reduce(operator.mul ,self.conv_stride ,1 )
364
"""simple docstring""" # tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. __UpperCamelCase : int = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def __SCREAMING_SNAKE_CASE ( A_ ): from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(A_ ) def __SCREAMING_SNAKE_CASE ( A_ ): from diffusers.utils.testing_utils import pytest_terminal_summary_main lowerCAmelCase__ : str = terminalreporter.config.getoption('''--make-reports''' ) if make_reports: pytest_terminal_summary_main(A_ , id=A_ )
74
0
'''simple docstring''' import argparse import torch from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert from transformers.utils import logging logging.set_verbosity_info() def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' A : List[str] = MobileBertConfig.from_json_file(snake_case__ ) print(F'Building PyTorch model from configuration: {config}' ) A : Optional[Any] = MobileBertForPreTraining(snake_case__ ) # Load weights from tf checkpoint A : Optional[int] = load_tf_weights_in_mobilebert(snake_case__ , snake_case__ , snake_case__ ) # Save pytorch-model print(F'Save PyTorch model to {pytorch_dump_path}' ) torch.save(model.state_dict() , snake_case__ ) if __name__ == "__main__": lowercase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--mobilebert_config_file', default=None, type=str, required=True, help=( 'The config json file corresponding to the pre-trained MobileBERT model. \n' 'This specifies the model architecture.' ), ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) lowercase : Optional[Any] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
3
'''simple docstring''' import os def lowerCAmelCase_ ( ): '''simple docstring''' A : List[Any] = os.path.join(os.path.dirname(snake_case__ ) , '''num.txt''' ) with open(snake_case__ ) as file_hand: return str(sum(int(snake_case__ ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
3
1
'''simple docstring''' import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv('TEST_SAGEMAKER' ,'False' ) ) is not True ,reason='Skipping test because should only be run when releasing minor transformers version' ,) @pytest.mark.usefixtures('sm_env' ) @parameterized_class( [ { 'framework': 'pytorch', 'script': 'run_glue.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 650, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'pytorch', 'script': 'run_ddp.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.7, 'eval_loss': 0.6}, }, { 'framework': 'tensorflow', 'script': 'run_tf_dist.py', 'model_name_or_path': 'distilbert-base-cased', 'instance_type': 'ml.p3.16xlarge', 'results': {'train_runtime': 600, 'eval_accuracy': 0.6, 'eval_loss': 0.7}, }, ] ) class A ( unittest.TestCase ): def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]: """simple docstring""" if self.framework == "pytorch": subprocess.run( F'cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'.split() , encoding='''utf-8''' , check=lowerCAmelCase_ , ) assert hasattr(self , '''env''' ) def __lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase_ : str ) -> Dict: """simple docstring""" _a = F'{self.env.base_job_name}-{instance_count}-{"ddp" if "ddp" in self.script else "smd"}' # distributed data settings _a = {'''smdistributed''': {'''dataparallel''': {'''enabled''': True}}} if self.script != '''run_ddp.py''' else None # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=lowerCAmelCase_ , instance_count=lowerCAmelCase_ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase_ , hyperparameters={**self.env.distributed_hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase_ , py_version='''py36''' , ) def __lowerCAmelCase ( self : List[Any] , lowerCAmelCase_ : List[str] ) -> Any: """simple docstring""" TrainingJobAnalytics(lowerCAmelCase_ ).export_csv(F'{self.env.test_path}/{job_name}_metrics.csv' ) @parameterized.expand([(2,)] ) def __lowerCAmelCase ( self : str , lowerCAmelCase_ : List[str] ) -> List[Any]: """simple docstring""" _a = self.create_estimator(lowerCAmelCase_ ) # run training estimator.fit() # result dataframe _a = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe() # extract kpis _a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] ) _a = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] ) # get train time from SageMaker job, this includes starting, preprocessing, stopping _a = ( Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 ) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy ) assert all(t <= self.results['''eval_loss'''] for t in eval_loss ) # dump tests result into json file to share in PR with open(F'{estimator.latest_training_job.name}.json' , '''w''' ) as outfile: json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase_ )
179
'''simple docstring''' import copy import inspect import unittest import numpy as np from huggingface_hub import hf_hub_download from transformers import VideoMAEConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEForPreTraining, VideoMAEForVideoClassification, VideoMAEModel, ) from transformers.models.videomae.modeling_videomae import VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from transformers import VideoMAEImageProcessor class A : def __init__( self : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : List[str]=13 , lowerCAmelCase_ : Optional[Any]=10 , lowerCAmelCase_ : Optional[int]=3 , lowerCAmelCase_ : Union[str, Any]=2 , lowerCAmelCase_ : Tuple=2 , lowerCAmelCase_ : Dict=2 , lowerCAmelCase_ : str=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Dict=32 , lowerCAmelCase_ : Tuple=5 , lowerCAmelCase_ : Optional[int]=4 , lowerCAmelCase_ : int=37 , lowerCAmelCase_ : int="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Dict=10 , lowerCAmelCase_ : int=0.0_2 , lowerCAmelCase_ : Union[str, Any]=0.9 , lowerCAmelCase_ : str=None , ) -> int: """simple docstring""" _a = parent _a = batch_size _a = image_size _a = num_channels _a = patch_size _a = tubelet_size _a = num_frames _a = is_training _a = use_labels _a = hidden_size _a = num_hidden_layers _a = num_attention_heads _a = intermediate_size _a = hidden_act _a = hidden_dropout_prob _a = attention_probs_dropout_prob _a = type_sequence_label_size _a = initializer_range _a = mask_ratio _a = scope # in VideoMAE, the number of tokens equals num_frames/tubelet_size * num_patches per frame _a = (image_size // patch_size) ** 2 _a = (num_frames // tubelet_size) * self.num_patches_per_frame # use this variable to define bool_masked_pos _a = int(mask_ratio * self.seq_length ) def __lowerCAmelCase ( self : Any ) -> Optional[int]: """simple docstring""" _a = floats_tensor( [self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] ) _a = None if self.use_labels: _a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) _a = self.get_config() return config, pixel_values, labels def __lowerCAmelCase ( self : List[str] ) -> Any: """simple docstring""" return VideoMAEConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , tubelet_size=self.tubelet_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , ) def __lowerCAmelCase ( self : List[str] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> Optional[int]: """simple docstring""" _a = VideoMAEModel(config=lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() _a = model(lowerCAmelCase_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __lowerCAmelCase ( self : Any , lowerCAmelCase_ : Optional[int] , lowerCAmelCase_ : Dict , lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]: """simple docstring""" _a = VideoMAEForPreTraining(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch _a = torch.ones((self.num_masks,) ) _a = torch.cat([mask, torch.zeros(self.seq_length - mask.size(0 ) )] ) _a = mask.expand(self.batch_size , -1 ).bool() _a = model(lowerCAmelCase_ , lowerCAmelCase_ ) # model only returns predictions for masked patches _a = mask.sum().item() _a = 3 * self.tubelet_size * self.patch_size**2 self.parent.assertEqual(result.logits.shape , (self.batch_size, num_masked_patches, decoder_num_labels) ) def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" _a = self.prepare_config_and_inputs() _a , _a , _a = config_and_inputs _a = {'''pixel_values''': pixel_values} return config, inputs_dict @require_torch class A ( _a ,_a ,unittest.TestCase ): lowercase_ = ( (VideoMAEModel, VideoMAEForPreTraining, VideoMAEForVideoClassification) if is_torch_available() else () ) lowercase_ = ( {'feature-extraction': VideoMAEModel, 'video-classification': VideoMAEForVideoClassification} if is_torch_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowerCAmelCase ( self : Dict ) -> int: """simple docstring""" _a = VideoMAEModelTester(self ) _a = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=37 ) def __lowerCAmelCase ( self : Optional[int] , lowerCAmelCase_ : Any , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any]=False ) -> Tuple: """simple docstring""" _a = copy.deepcopy(lowerCAmelCase_ ) if model_class == VideoMAEForPreTraining: # important: each video needs to have the same number of masked patches # hence we define a single mask, which we then repeat for each example in the batch _a = torch.ones((self.model_tester.num_masks,) ) _a = torch.cat([mask, torch.zeros(self.model_tester.seq_length - mask.size(0 ) )] ) _a = mask.expand(self.model_tester.batch_size , -1 ).bool() _a = bool_masked_pos.to(lowerCAmelCase_ ) if return_labels: if model_class in [ *get_values(lowerCAmelCase_ ), ]: _a = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=lowerCAmelCase_ ) return inputs_dict def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]: """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason='''VideoMAE does not use inputs_embeds''' ) def __lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" pass def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(lowerCAmelCase_ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) _a = model.get_output_embeddings() self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) ) def __lowerCAmelCase ( self : Any ) -> Any: """simple docstring""" _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = model_class(lowerCAmelCase_ ) _a = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _a = [*signature.parameters.keys()] _a = ['''pixel_values'''] self.assertListEqual(arg_names[:1] , lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCAmelCase_ ) def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]: """simple docstring""" _a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase_ ) @slow def __lowerCAmelCase ( self : Dict ) -> Optional[int]: """simple docstring""" for model_name in VIDEOMAE_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _a = VideoMAEModel.from_pretrained(lowerCAmelCase_ ) self.assertIsNotNone(lowerCAmelCase_ ) def __lowerCAmelCase ( self : int ) -> Any: """simple docstring""" if not self.has_attentions: pass else: _a , _a = self.model_tester.prepare_config_and_inputs_for_common() _a = True for model_class in self.all_model_classes: _a = self.model_tester.seq_length - self.model_tester.num_masks _a = ( num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length ) _a = True _a = False _a = True _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) # check that output_attentions also work using config del inputs_dict["output_attentions"] _a = True _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) _a = len(lowerCAmelCase_ ) # Check attention is always last and order is fine _a = True _a = True _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) self.assertEqual(out_len + 1 , len(lowerCAmelCase_ ) ) _a = outputs.attentions self.assertEqual(len(lowerCAmelCase_ ) , self.model_tester.num_hidden_layers ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , ) def __lowerCAmelCase ( self : Tuple ) -> Tuple: """simple docstring""" def check_hidden_states_output(lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ): _a = model_class(lowerCAmelCase_ ) model.to(lowerCAmelCase_ ) model.eval() with torch.no_grad(): _a = model(**self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ ) ) _a = outputs.hidden_states _a = self.model_tester.num_hidden_layers + 1 self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ ) _a = self.model_tester.seq_length - self.model_tester.num_masks _a = num_visible_patches if model_class == VideoMAEForPreTraining else self.model_tester.seq_length self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , ) _a , _a = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _a = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _a = True check_hidden_states_output(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) @unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' ) def __lowerCAmelCase ( self : Optional[int] ) -> Dict: """simple docstring""" pass def snake_case_ (): '''simple docstring''' _a = hf_hub_download( repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' ) _a = np.load(UpperCamelCase ) return list(UpperCamelCase ) @require_torch @require_vision class A ( unittest.TestCase ): @cached_property def __lowerCAmelCase ( self : str ) -> List[Any]: """simple docstring""" return ( VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] ) if is_vision_available() else None ) @slow def __lowerCAmelCase ( self : Dict ) -> Dict: """simple docstring""" _a = VideoMAEForVideoClassification.from_pretrained('''MCG-NJU/videomae-base-finetuned-kinetics''' ).to( lowerCAmelCase_ ) _a = self.default_image_processor _a = prepare_video() _a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) # forward pass with torch.no_grad(): _a = model(**lowerCAmelCase_ ) # verify the logits _a = torch.Size((1, 4_00) ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) _a = torch.tensor([0.3_6_6_9, -0.0_6_8_8, -0.2_4_2_1] ).to(lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) ) @slow def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]: """simple docstring""" _a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' ).to(lowerCAmelCase_ ) _a = self.default_image_processor _a = prepare_video() _a = image_processor(lowerCAmelCase_ , return_tensors='''pt''' ).to(lowerCAmelCase_ ) # add boolean mask, indicating which patches to mask _a = hf_hub_download(repo_id='''hf-internal-testing/bool-masked-pos''' , filename='''bool_masked_pos.pt''' ) _a = torch.load(lowerCAmelCase_ ) # forward pass with torch.no_grad(): _a = model(**lowerCAmelCase_ ) # verify the logits _a = torch.Size([1, 14_08, 15_36] ) _a = torch.tensor( [[0.7_9_9_4, 0.9_6_1_2, 0.8_5_0_8], [0.7_4_0_1, 0.8_9_5_8, 0.8_3_0_2], [0.5_8_6_2, 0.7_4_6_8, 0.7_3_2_5]] , device=lowerCAmelCase_ ) self.assertEqual(outputs.logits.shape , lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , lowerCAmelCase_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `True`) _a = torch.tensor([0.5_1_4_2] , device=lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) ) # verify the loss (`config.norm_pix_loss` = `False`) _a = VideoMAEForPreTraining.from_pretrained('''MCG-NJU/videomae-base-short''' , norm_pix_loss=lowerCAmelCase_ ).to( lowerCAmelCase_ ) with torch.no_grad(): _a = model(**lowerCAmelCase_ ) _a = torch.tensor(torch.tensor([0.6_4_6_9] ) , device=lowerCAmelCase_ ) self.assertTrue(torch.allclose(outputs.loss , lowerCAmelCase_ , atol=1e-4 ) )
179
1
from __future__ import annotations A__ = tuple[int, int, int] A__ = tuple[str, str, str] # used alphabet -------------------------- # from string.ascii_uppercase A__ = """ABCDEFGHIJKLMNOPQRSTUVWXYZ""" # -------------------------- default selection -------------------------- # rotors -------------------------- A__ = """EGZWVONAHDCLFQMSIPJBYUKXTR""" A__ = """FOBHMDKEXQNRAULPGSJVTYICZW""" A__ = """ZJXESIUQLHAVRMDOYGTNFWPBKC""" # reflector -------------------------- A__ = { """A""": """N""", """N""": """A""", """B""": """O""", """O""": """B""", """C""": """P""", """P""": """C""", """D""": """Q""", """Q""": """D""", """E""": """R""", """R""": """E""", """F""": """S""", """S""": """F""", """G""": """T""", """T""": """G""", """H""": """U""", """U""": """H""", """I""": """V""", """V""": """I""", """J""": """W""", """W""": """J""", """K""": """X""", """X""": """K""", """L""": """Y""", """Y""": """L""", """M""": """Z""", """Z""": """M""", } # -------------------------- extra rotors -------------------------- A__ = """RMDJXFUWGISLHVTCQNKYPBEZOA""" A__ = """SGLCPQWZHKXAREONTFBVIYJUDM""" A__ = """HVSICLTYKQUBXDWAJZOMFGPREN""" A__ = """RZWQHFMVDBKICJLNTUXAGYPSOE""" A__ = """LFKIJODBEGAMQPXVUHYSTCZRWN""" A__ = """KOAEGVDHXPQZMLFTYWJNBRCIUS""" def _UpperCAmelCase ( snake_case , snake_case , snake_case ): """simple docstring""" if (unique_rotsel := len(set(snake_case ) )) < 3: _lowerCAmelCase = F'Please use 3 unique rotors (not {unique_rotsel})' raise Exception(snake_case ) # Checks if rotor positions are valid _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rotpos if not 0 < rotorposa <= len(snake_case ): _lowerCAmelCase = F'First rotor position is not within range of 1..26 ({rotorposa}' raise ValueError(snake_case ) if not 0 < rotorposa <= len(snake_case ): _lowerCAmelCase = F'Second rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(snake_case ) if not 0 < rotorposa <= len(snake_case ): _lowerCAmelCase = F'Third rotor position is not within range of 1..26 ({rotorposa})' raise ValueError(snake_case ) # Validates string and returns dict _lowerCAmelCase = _plugboard(snake_case ) return rotpos, rotsel, pbdict def _UpperCAmelCase ( snake_case ): """simple docstring""" if not isinstance(snake_case , snake_case ): _lowerCAmelCase = F'Plugboard setting isn\'t type string ({type(snake_case )})' raise TypeError(snake_case ) elif len(snake_case ) % 2 != 0: _lowerCAmelCase = F'Odd number of symbols ({len(snake_case )})' raise Exception(snake_case ) elif pbstring == "": return {} pbstring.replace(""" """ , """""" ) # Checks if all characters are unique _lowerCAmelCase = set() for i in pbstring: if i not in abc: _lowerCAmelCase = F'\'{i}\' not in list of symbols' raise Exception(snake_case ) elif i in tmppbl: _lowerCAmelCase = F'Duplicate symbol ({i})' raise Exception(snake_case ) else: tmppbl.add(snake_case ) del tmppbl # Created the dictionary _lowerCAmelCase = {} for j in range(0 , len(snake_case ) - 1 , 2 ): _lowerCAmelCase = pbstring[j + 1] _lowerCAmelCase = pbstring[j] return pb def _UpperCAmelCase ( snake_case , snake_case , snake_case = (rotora, rotora, rotora) , snake_case = "" , ): """simple docstring""" _lowerCAmelCase = text.upper() _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = _validator( snake_case , snake_case , plugb.upper() ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rotor_position _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = rotor_selection rotorposa -= 1 rotorposa -= 1 rotorposa -= 1 _lowerCAmelCase = [] # encryption/decryption process -------------------------- for symbol in text: if symbol in abc: # 1st plugboard -------------------------- if symbol in plugboard: _lowerCAmelCase = plugboard[symbol] # rotor ra -------------------------- _lowerCAmelCase = abc.index(snake_case ) + rotorposa _lowerCAmelCase = rotora[index % len(snake_case )] # rotor rb -------------------------- _lowerCAmelCase = abc.index(snake_case ) + rotorposa _lowerCAmelCase = rotora[index % len(snake_case )] # rotor rc -------------------------- _lowerCAmelCase = abc.index(snake_case ) + rotorposa _lowerCAmelCase = rotora[index % len(snake_case )] # reflector -------------------------- # this is the reason you don't need another machine to decipher _lowerCAmelCase = reflector[symbol] # 2nd rotors _lowerCAmelCase = abc[rotora.index(snake_case ) - rotorposa] _lowerCAmelCase = abc[rotora.index(snake_case ) - rotorposa] _lowerCAmelCase = abc[rotora.index(snake_case ) - rotorposa] # 2nd plugboard if symbol in plugboard: _lowerCAmelCase = plugboard[symbol] # moves/resets rotor positions rotorposa += 1 if rotorposa >= len(snake_case ): _lowerCAmelCase = 0 rotorposa += 1 if rotorposa >= len(snake_case ): _lowerCAmelCase = 0 rotorposa += 1 if rotorposa >= len(snake_case ): _lowerCAmelCase = 0 # else: # pass # Error could be also raised # raise ValueError( # 'Invalid symbol('+repr(symbol)+')') result.append(snake_case ) return "".join(snake_case ) if __name__ == "__main__": A__ = """This is my Python script that emulates the Enigma machine from WWII.""" A__ = (1, 1, 1) A__ = """pictures""" A__ = (rotora, rotora, rotora) A__ = enigma(message, rotor_pos, rotor_sel, pb) print("""Encrypted message:""", en) print("""Decrypted message:""", enigma(en, rotor_pos, rotor_sel, pb))
82
from __future__ import annotations import math def _UpperCAmelCase ( snake_case ): """simple docstring""" if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(snake_case ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _UpperCAmelCase ( snake_case ): """simple docstring""" _lowerCAmelCase = str(snake_case ) _lowerCAmelCase = [n] for i in range(1 , len(snake_case ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _UpperCAmelCase ( snake_case ): """simple docstring""" if len(str(snake_case ) ) > 3: if not is_prime(int(str(snake_case )[-3:] ) ) or not is_prime(int(str(snake_case )[:3] ) ): return False return True def _UpperCAmelCase ( snake_case = 11 ): """simple docstring""" _lowerCAmelCase = [] _lowerCAmelCase = 13 while len(snake_case ) != count: if validate(snake_case ): _lowerCAmelCase = list_truncated_nums(snake_case ) if all(is_prime(snake_case ) for i in list_nums ): list_truncated_primes.append(snake_case ) num += 2 return list_truncated_primes def _UpperCAmelCase ( ): """simple docstring""" return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(f"{sum(compute_truncated_primes(11)) = }")
82
1
"""simple docstring""" def lowerCAmelCase_ ( __lowerCAmelCase )-> float: '''simple docstring''' if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError('''Length must be a positive.''' ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def lowerCAmelCase_ ( __lowerCAmelCase )-> float: '''simple docstring''' if edge <= 0 or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ): raise ValueError('''Length must be a positive.''' ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
366
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __snake_case = { '''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''LlamaTokenizer'''] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = ['''LlamaTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __snake_case = [ '''LlamaForCausalLM''', '''LlamaModel''', '''LlamaPreTrainedModel''', '''LlamaForSequenceClassification''', ] if TYPE_CHECKING: from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama import LlamaTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_llama_fast import LlamaTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel else: import sys __snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
78
0
import argparse import torch from transformers import BlenderbotConfig, BlenderbotForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() lowerCamelCase : Optional[int] = logging.get_logger(__name__) lowerCamelCase : int = [ ['''attention''', '''attn'''], ['''encoder_attention''', '''encoder_attn'''], ['''q_lin''', '''q_proj'''], ['''k_lin''', '''k_proj'''], ['''v_lin''', '''v_proj'''], ['''out_lin''', '''out_proj'''], ['''norm_embeddings''', '''layernorm_embedding'''], ['''position_embeddings''', '''embed_positions'''], ['''embeddings''', '''embed_tokens'''], ['''ffn.lin''', '''fc'''], ] def snake_case_ ( lowerCAmelCase_ : str ): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: __lowercase : Tuple = k.replace(lowerCAmelCase_ , lowerCAmelCase_ ) if k.startswith("""encoder""" ): __lowercase : Union[str, Any] = k.replace(""".attn""" , """.self_attn""" ) __lowercase : Any = k.replace("""norm1""" , """self_attn_layer_norm""" ) __lowercase : Any = k.replace("""norm2""" , """final_layer_norm""" ) elif k.startswith("""decoder""" ): __lowercase : Optional[int] = k.replace("""norm1""" , """self_attn_layer_norm""" ) __lowercase : Tuple = k.replace("""norm2""" , """encoder_attn_layer_norm""" ) __lowercase : Tuple = k.replace("""norm3""" , """final_layer_norm""" ) return k def snake_case_ ( lowerCAmelCase_ : int ): __lowercase : List[str] = [ """model.encoder.layernorm_embedding.weight""", """model.encoder.layernorm_embedding.bias""", """model.decoder.layernorm_embedding.weight""", """model.decoder.layernorm_embedding.bias""", ] for k in keys: __lowercase : Optional[int] = sd.pop(lowerCAmelCase_ ) __lowercase : List[Any] = k.replace("""layernorm_embedding""" , """layer_norm""" ) assert new_k not in sd __lowercase : Optional[Any] = v lowerCamelCase : Any = ['''START'''] @torch.no_grad() def snake_case_ ( lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : str ): __lowercase : str = torch.load(lowerCAmelCase_ , map_location="""cpu""" ) __lowercase : Optional[int] = model["""model"""] __lowercase : Optional[Any] = BlenderbotConfig.from_json_file(lowerCAmelCase_ ) __lowercase : str = BlenderbotForConditionalGeneration(lowerCAmelCase_ ) __lowercase : Tuple = m.model.state_dict().keys() __lowercase : str = [] __lowercase : int = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue __lowercase : Any = rename_state_dict_key(lowerCAmelCase_ ) if new_k not in valid_keys: failures.append([k, new_k] ) else: __lowercase : Optional[Any] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(lowerCAmelCase_ ) m.model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) m.half() m.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": lowerCamelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument('''--src_path''', type=str, help='''like blenderbot-model.bin''') parser.add_argument('''--save_dir''', default='''hf_blenderbot''', type=str, help='''Where to save converted model.''') parser.add_argument( '''--hf_config_json''', default='''blenderbot-3b-config.json''', type=str, help='''Path to config to use''' ) lowerCamelCase : Union[str, Any] = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
233
from __future__ import annotations import math lowerCamelCase : List[Any] = '''2020.9.26''' lowerCamelCase : str = '''xcodz-dot, cclaus, dhruvmanila''' def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float ): if not all(isinstance(lowerCAmelCase_ , (float, int) ) for val in locals().values() ): __lowercase : str = F"Input values must either be float or int: {list(locals().values() )}" raise TypeError(lowerCAmelCase_ ) __lowercase : List[Any] = ((x * distance) / (z + distance)) * scale __lowercase : Tuple = ((y * distance) / (z + distance)) * scale return projected_x, projected_y def snake_case_ ( lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : float , lowerCAmelCase_ : str , lowerCAmelCase_ : float ): if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): raise TypeError("""Axis must be a str""" ) __lowercase : Optional[int] = locals() del input_variables["axis"] if not all(isinstance(lowerCAmelCase_ , (float, int) ) for val in input_variables.values() ): __lowercase : List[str] = ( """Input values except axis must either be float or int: """ F"{list(input_variables.values() )}" ) raise TypeError(lowerCAmelCase_ ) __lowercase : Tuple = (angle % 360) / 450 * 180 / math.pi if axis == "z": __lowercase : int = x * math.cos(lowerCAmelCase_ ) - y * math.sin(lowerCAmelCase_ ) __lowercase : Tuple = y * math.cos(lowerCAmelCase_ ) + x * math.sin(lowerCAmelCase_ ) __lowercase : Union[str, Any] = z elif axis == "x": __lowercase : str = y * math.cos(lowerCAmelCase_ ) - z * math.sin(lowerCAmelCase_ ) __lowercase : Dict = z * math.cos(lowerCAmelCase_ ) + y * math.sin(lowerCAmelCase_ ) __lowercase : List[str] = x elif axis == "y": __lowercase : List[str] = x * math.cos(lowerCAmelCase_ ) - z * math.sin(lowerCAmelCase_ ) __lowercase : List[str] = z * math.cos(lowerCAmelCase_ ) + x * math.sin(lowerCAmelCase_ ) __lowercase : List[Any] = y else: raise ValueError("""not a valid axis, choose one of 'x', 'y', 'z'""" ) return new_x, new_y, new_z if __name__ == "__main__": import doctest doctest.testmod() print(f'''{convert_to_ad(1.0, 2.0, 3.0, 10.0, 10.0) = }''') print(f'''{rotate(1.0, 2.0, 3.0, "y", 90.0) = }''')
233
1
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = " " ) -> list: lowercase : str = [] lowercase : int = 0 for index, char in enumerate(SCREAMING_SNAKE_CASE__ ): if char == separator: split_words.append(string[last_index:index] ) lowercase : Dict = index + 1 elif index + 1 == len(SCREAMING_SNAKE_CASE__ ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
285
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: def count_of_possible_combinations(SCREAMING_SNAKE_CASE__ ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: def count_of_possible_combinations_with_dp_array( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] lowercase : Any = sum( count_of_possible_combinations_with_dp_array(target - item , SCREAMING_SNAKE_CASE__ ) for item in array ) lowercase : Optional[int] = answer return answer lowercase : int = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> int: lowercase : str = [0] * (target + 1) lowercase : Tuple = 1 for i in range(1 , target + 1 ): for j in range(SCREAMING_SNAKE_CASE__ ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() lowercase : Any = 3 lowercase : Optional[Any] = 5 lowercase : Tuple = [1, 2, 5] print(combination_sum_iv(n, array, target))
285
1
"""simple docstring""" import itertools import string from collections.abc import Generator, Iterable def lowercase ( _SCREAMING_SNAKE_CASE : Iterable[str] , _SCREAMING_SNAKE_CASE : int ): '''simple docstring''' _UpperCAmelCase = iter(_UpperCamelCase ) while True: _UpperCAmelCase = tuple(itertools.islice(_UpperCamelCase , _UpperCamelCase ) ) if not chunk: return yield chunk def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = ''''''.join([c.upper() for c in dirty if c in string.ascii_letters] ) _UpperCAmelCase = '''''' if len(_UpperCamelCase ) < 2: return dirty for i in range(len(_UpperCamelCase ) - 1 ): clean += dirty[i] if dirty[i] == dirty[i + 1]: clean += "X" clean += dirty[-1] if len(_UpperCamelCase ) & 1: clean += "X" return clean def lowercase ( _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = '''ABCDEFGHIKLMNOPQRSTUVWXYZ''' # we're using a list instead of a '2d' array because it makes the math # for setting up the table and doing the actual encoding/decoding simpler _UpperCAmelCase = [] # copy key chars into the table if they are in `alphabet` ignoring duplicates for char in key.upper(): if char not in table and char in alphabet: table.append(_UpperCamelCase ) # fill the rest of the table in with the remaining alphabet chars for char in alphabet: if char not in table: table.append(_UpperCamelCase ) return table def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = generate_table(_UpperCamelCase ) _UpperCAmelCase = prepare_input(_UpperCamelCase ) _UpperCAmelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCamelCase , 2 ): _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCamelCase ) , 5 ) _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCamelCase ) , 5 ) if rowa == rowa: ciphertext += table[rowa * 5 + (cola + 1) % 5] ciphertext += table[rowa * 5 + (cola + 1) % 5] elif cola == cola: ciphertext += table[((rowa + 1) % 5) * 5 + cola] ciphertext += table[((rowa + 1) % 5) * 5 + cola] else: # rectangle ciphertext += table[rowa * 5 + cola] ciphertext += table[rowa * 5 + cola] return ciphertext def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : str ): '''simple docstring''' _UpperCAmelCase = generate_table(_UpperCamelCase ) _UpperCAmelCase = '''''' # https://en.wikipedia.org/wiki/Playfair_cipher#Description for chara, chara in chunker(_UpperCamelCase , 2 ): _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCamelCase ) , 5 ) _UpperCAmelCase , _UpperCAmelCase = divmod(table.index(_UpperCamelCase ) , 5 ) if rowa == rowa: plaintext += table[rowa * 5 + (cola - 1) % 5] plaintext += table[rowa * 5 + (cola - 1) % 5] elif cola == cola: plaintext += table[((rowa - 1) % 5) * 5 + cola] plaintext += table[((rowa - 1) % 5) * 5 + cola] else: # rectangle plaintext += table[rowa * 5 + cola] plaintext += table[rowa * 5 + cola] return plaintext
260
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( HubertConfig, HubertForCTC, HubertModel, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaProcessor, logging, ) logging.set_verbosity_info() SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "w2v_model.layer_norm": "feature_projection.layer_norm", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", } def lowerCAmelCase__ ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[Any] ) -> Union[str, Any]: """simple docstring""" for attribute in key.split('.' ): snake_case = getattr(_UpperCamelCase , _UpperCamelCase ) if weight_type is not None: snake_case = getattr(_UpperCamelCase , _UpperCamelCase ).shape else: snake_case = hf_pointer.shape assert hf_shape == value.shape, ( f"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be""" f""" {value.shape} for {full_name}""" ) if weight_type == "weight": snake_case = value elif weight_type == "weight_g": snake_case = value elif weight_type == "weight_v": snake_case = value elif weight_type == "bias": snake_case = value else: snake_case = value logger.info(f"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" ) def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : List[Any] ) -> List[Any]: """simple docstring""" snake_case = [] snake_case = fairseq_model.state_dict() snake_case = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor for name, value in fairseq_dict.items(): snake_case = False if "conv_layers" in name: load_conv_layer( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , ) snake_case = True else: for key, mapped_key in MAPPING.items(): snake_case = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned): snake_case = True if "*" in mapped_key: snake_case = name.split(_UpperCamelCase )[0].split('.' )[-2] snake_case = mapped_key.replace('*' , _UpperCamelCase ) if "weight_g" in name: snake_case = 'weight_g' elif "weight_v" in name: snake_case = 'weight_v' elif "weight" in name: snake_case = 'weight' elif "bias" in name: snake_case = 'bias' else: snake_case = None set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) continue if not is_used: unused_weights.append(_UpperCamelCase ) logger.warning(f"""Unused weights: {unused_weights}""" ) def lowerCAmelCase__ ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any] , _UpperCamelCase : Any ) -> Any: """simple docstring""" snake_case = full_name.split('conv_layers.' )[-1] snake_case = name.split('.' ) snake_case = int(items[0] ) snake_case = int(items[1] ) if type_id == 0: if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" ) snake_case = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" ) snake_case = value logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, ( f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was""" " found." ) snake_case = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) elif "weight" in name: assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, ( f"""{full_name} has size {value.shape}, but""" f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.""" ) snake_case = value logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" ) else: unused_weights.append(_UpperCamelCase ) @torch.no_grad() def lowerCAmelCase__ ( _UpperCamelCase : str , _UpperCamelCase : Any , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Any=None , _UpperCamelCase : Union[str, Any]=True ) -> List[Any]: """simple docstring""" if config_path is not None: snake_case = HubertConfig.from_pretrained(_UpperCamelCase ) else: snake_case = HubertConfig() if is_finetuned: if dict_path: snake_case = Dictionary.load(_UpperCamelCase ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq snake_case = target_dict.pad_index snake_case = target_dict.bos_index snake_case = target_dict.eos_index snake_case = len(target_dict.symbols ) snake_case = os.path.join(_UpperCamelCase , 'vocab.json' ) if not os.path.isdir(_UpperCamelCase ): logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(_UpperCamelCase ) ) return os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase ) with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as vocab_handle: json.dump(target_dict.indices , _UpperCamelCase ) snake_case = WavaVecaCTCTokenizer( _UpperCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=_UpperCamelCase , ) snake_case = True if config.feat_extract_norm == 'layer' else False snake_case = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_6_0_0_0 , padding_value=0 , do_normalize=_UpperCamelCase , return_attention_mask=_UpperCamelCase , ) snake_case = WavaVecaProcessor(feature_extractor=_UpperCamelCase , tokenizer=_UpperCamelCase ) processor.save_pretrained(_UpperCamelCase ) snake_case = HubertForCTC(_UpperCamelCase ) else: snake_case = HubertModel(_UpperCamelCase ) if is_finetuned: snake_case ,snake_case ,snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} ) else: snake_case ,snake_case ,snake_case = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] ) snake_case = model[0].eval() recursively_load_weights(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) hf_wavavec.save_pretrained(_UpperCamelCase ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) SCREAMING_SNAKE_CASE__ = parser.parse_args() convert_hubert_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned )
150
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) __A : Optional[Any] = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Tuple = ['NllbTokenizer'] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[Any] = ['NllbTokenizerFast'] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb import NllbTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_nllb_fast import NllbTokenizerFast else: import sys __A : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
361
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel from transformers.utils import logging logging.set_verbosity_info() __A : List[str] = logging.get_logger(__name__) def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : Tuple=False ): '''simple docstring''' lowerCAmelCase_ : Tuple = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f'blocks.{i}.norm1.weight', f'vit.encoder.layer.{i}.layernorm_before.weight') ) rename_keys.append((f'blocks.{i}.norm1.bias', f'vit.encoder.layer.{i}.layernorm_before.bias') ) rename_keys.append((f'blocks.{i}.attn.proj.weight', f'vit.encoder.layer.{i}.attention.output.dense.weight') ) rename_keys.append((f'blocks.{i}.attn.proj.bias', f'vit.encoder.layer.{i}.attention.output.dense.bias') ) rename_keys.append((f'blocks.{i}.norm2.weight', f'vit.encoder.layer.{i}.layernorm_after.weight') ) rename_keys.append((f'blocks.{i}.norm2.bias', f'vit.encoder.layer.{i}.layernorm_after.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc1.weight', f'vit.encoder.layer.{i}.intermediate.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc1.bias', f'vit.encoder.layer.{i}.intermediate.dense.bias') ) rename_keys.append((f'blocks.{i}.mlp.fc2.weight', f'vit.encoder.layer.{i}.output.dense.weight') ) rename_keys.append((f'blocks.{i}.mlp.fc2.bias', f'vit.encoder.layer.{i}.output.dense.bias') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """vit.embeddings.cls_token"""), ("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """vit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "vit" from all keys that start with "vit" lowerCAmelCase_ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""" ) else pair for pair in rename_keys] else: # layernorm + classification head rename_keys.extend( [ ("""norm.weight""", """vit.layernorm.weight"""), ("""norm.bias""", """vit.layernorm.bias"""), ("""head.weight""", """classifier.weight"""), ("""head.bias""", """classifier.bias"""), ] ) return rename_keys def UpperCamelCase_ ( A__ : Any , A__ : Any , A__ : Tuple=False ): '''simple docstring''' for i in range(config.num_hidden_layers ): if base_model: lowerCAmelCase_ : Optional[Any] = """""" else: lowerCAmelCase_ : Optional[Any] = """vit.""" # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) lowerCAmelCase_ : List[Any] = state_dict.pop(f'blocks.{i}.attn.qkv.weight' ) lowerCAmelCase_ : Union[str, Any] = state_dict.pop(f'blocks.{i}.attn.qkv.bias' ) # next, add query, keys and values (in that order) to the state dict lowerCAmelCase_ : Dict = in_proj_weight[ : config.hidden_size, : ] lowerCAmelCase_ : List[Any] = in_proj_bias[: config.hidden_size] lowerCAmelCase_ : Union[str, Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] lowerCAmelCase_ : List[str] = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] lowerCAmelCase_ : Any = in_proj_weight[ -config.hidden_size :, : ] lowerCAmelCase_ : Union[str, Any] = in_proj_bias[-config.hidden_size :] def UpperCamelCase_ ( A__ : str ): '''simple docstring''' lowerCAmelCase_ : Dict = ["""head.weight""", """head.bias"""] for k in ignore_keys: state_dict.pop(A__ , A__ ) def UpperCamelCase_ ( A__ : List[Any] , A__ : Optional[Any] , A__ : Dict ): '''simple docstring''' lowerCAmelCase_ : Tuple = dct.pop(A__ ) lowerCAmelCase_ : Tuple = val def UpperCamelCase_ ( ): '''simple docstring''' lowerCAmelCase_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg""" lowerCAmelCase_ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw ) return im @torch.no_grad() def UpperCamelCase_ ( A__ : Union[str, Any] , A__ : List[Any] ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = ViTConfig() lowerCAmelCase_ : Any = False # dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size if vit_name[-5:] == "in21k": lowerCAmelCase_ : int = True lowerCAmelCase_ : Tuple = int(vit_name[-12:-10] ) lowerCAmelCase_ : Optional[int] = int(vit_name[-9:-6] ) else: lowerCAmelCase_ : Optional[int] = 10_00 lowerCAmelCase_ : Tuple = """huggingface/label-files""" lowerCAmelCase_ : Any = """imagenet-1k-id2label.json""" lowerCAmelCase_ : Dict = json.load(open(hf_hub_download(A__ , A__ , repo_type="""dataset""" ) , """r""" ) ) lowerCAmelCase_ : Union[str, Any] = {int(A__ ): v for k, v in idalabel.items()} lowerCAmelCase_ : Union[str, Any] = idalabel lowerCAmelCase_ : Union[str, Any] = {v: k for k, v in idalabel.items()} lowerCAmelCase_ : Optional[int] = int(vit_name[-6:-4] ) lowerCAmelCase_ : Dict = int(vit_name[-3:] ) # size of the architecture if "deit" in vit_name: if vit_name[9:].startswith("""tiny""" ): lowerCAmelCase_ : int = 1_92 lowerCAmelCase_ : List[str] = 7_68 lowerCAmelCase_ : List[str] = 12 lowerCAmelCase_ : int = 3 elif vit_name[9:].startswith("""small""" ): lowerCAmelCase_ : Optional[Any] = 3_84 lowerCAmelCase_ : Optional[int] = 15_36 lowerCAmelCase_ : Dict = 12 lowerCAmelCase_ : str = 6 else: pass else: if vit_name[4:].startswith("""small""" ): lowerCAmelCase_ : Tuple = 7_68 lowerCAmelCase_ : Any = 23_04 lowerCAmelCase_ : List[str] = 8 lowerCAmelCase_ : List[str] = 8 elif vit_name[4:].startswith("""base""" ): pass elif vit_name[4:].startswith("""large""" ): lowerCAmelCase_ : Dict = 10_24 lowerCAmelCase_ : List[Any] = 40_96 lowerCAmelCase_ : Any = 24 lowerCAmelCase_ : List[str] = 16 elif vit_name[4:].startswith("""huge""" ): lowerCAmelCase_ : Optional[int] = 12_80 lowerCAmelCase_ : Dict = 51_20 lowerCAmelCase_ : Union[str, Any] = 32 lowerCAmelCase_ : Optional[int] = 16 # load original model from timm lowerCAmelCase_ : Union[str, Any] = timm.create_model(A__ , pretrained=A__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys lowerCAmelCase_ : int = timm_model.state_dict() if base_model: remove_classification_head_(A__ ) lowerCAmelCase_ : str = create_rename_keys(A__ , A__ ) for src, dest in rename_keys: rename_key(A__ , A__ , A__ ) read_in_q_k_v(A__ , A__ , A__ ) # load HuggingFace model if vit_name[-5:] == "in21k": lowerCAmelCase_ : int = ViTModel(A__ ).eval() else: lowerCAmelCase_ : Optional[int] = ViTForImageClassification(A__ ).eval() model.load_state_dict(A__ ) # Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor if "deit" in vit_name: lowerCAmelCase_ : Any = DeiTImageProcessor(size=config.image_size ) else: lowerCAmelCase_ : Any = ViTImageProcessor(size=config.image_size ) lowerCAmelCase_ : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" ) lowerCAmelCase_ : int = encoding["""pixel_values"""] lowerCAmelCase_ : int = model(A__ ) if base_model: lowerCAmelCase_ : Union[str, Any] = timm_model.forward_features(A__ ) assert timm_pooled_output.shape == outputs.pooler_output.shape assert torch.allclose(A__ , outputs.pooler_output , atol=1E-3 ) else: lowerCAmelCase_ : Union[str, Any] = timm_model(A__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(A__ , outputs.logits , atol=1E-3 ) Path(A__ ).mkdir(exist_ok=A__ ) print(f'Saving model {vit_name} to {pytorch_dump_folder_path}' ) model.save_pretrained(A__ ) print(f'Saving image processor to {pytorch_dump_folder_path}' ) image_processor.save_pretrained(A__ ) if __name__ == "__main__": __A : Optional[int] = argparse.ArgumentParser() # Required parameters parser.add_argument( "--vit_name", default="vit_base_patch16_224", type=str, help="Name of the ViT timm model you'd like to convert.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory." ) __A : Union[str, Any] = parser.parse_args() convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
89
0
import importlib.util import os import platform from argparse import ArgumentParser import huggingface_hub from .. import __version__ as version from ..utils import ( is_accelerate_available, is_flax_available, is_safetensors_available, is_tf_available, is_torch_available, ) from . import BaseTransformersCLICommand def lowerCAmelCase_ ( __A ) -> Any: '''simple docstring''' return EnvironmentCommand() def lowerCAmelCase_ ( __A ) -> Optional[int]: '''simple docstring''' return EnvironmentCommand(args.accelerate_config_file ) class A ( UpperCAmelCase_ ): @staticmethod def lowercase_ (__UpperCAmelCase : ArgumentParser ) -> List[Any]: """simple docstring""" UpperCAmelCase__ = parser.add_parser("env" ) download_parser.set_defaults(func=__UpperCAmelCase ) download_parser.add_argument( "--accelerate-config_file" , default=__UpperCAmelCase , help="The accelerate config file to use for the default values in the launching script." , ) download_parser.set_defaults(func=__UpperCAmelCase ) def __init__(self : Optional[int] , __UpperCAmelCase : str , *__UpperCAmelCase : Tuple ) -> None: """simple docstring""" UpperCAmelCase__ = accelerate_config_file def lowercase_ (self : List[Any] ) -> Union[str, Any]: """simple docstring""" UpperCAmelCase__ = "not installed" if is_safetensors_available(): import safetensors UpperCAmelCase__ = safetensors.__version__ elif importlib.util.find_spec("safetensors" ) is not None: import safetensors UpperCAmelCase__ = f"""{safetensors.__version__} but is ignored because of PyTorch version too old.""" UpperCAmelCase__ = "not installed" UpperCAmelCase__ = UpperCAmelCase__ = "not found" if is_accelerate_available(): import accelerate from accelerate.commands.config import default_config_file, load_config_from_file UpperCAmelCase__ = accelerate.__version__ # Get the default from the config file. if self._accelerate_config_file is not None or os.path.isfile(__UpperCAmelCase ): UpperCAmelCase__ = load_config_from_file(self._accelerate_config_file ).to_dict() UpperCAmelCase__ = ( "\n".join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else f"""\t{accelerate_config}""" ) UpperCAmelCase__ = "not installed" UpperCAmelCase__ = "NA" if is_torch_available(): import torch UpperCAmelCase__ = torch.__version__ UpperCAmelCase__ = torch.cuda.is_available() UpperCAmelCase__ = "not installed" UpperCAmelCase__ = "NA" if is_tf_available(): import tensorflow as tf UpperCAmelCase__ = tf.__version__ try: # deprecated in v2.1 UpperCAmelCase__ = tf.test.is_gpu_available() except AttributeError: # returns list of devices, convert to bool UpperCAmelCase__ = bool(tf.config.list_physical_devices("GPU" ) ) UpperCAmelCase__ = "not installed" UpperCAmelCase__ = "not installed" UpperCAmelCase__ = "not installed" UpperCAmelCase__ = "NA" if is_flax_available(): import flax import jax import jaxlib UpperCAmelCase__ = flax.__version__ UpperCAmelCase__ = jax.__version__ UpperCAmelCase__ = jaxlib.__version__ UpperCAmelCase__ = jax.lib.xla_bridge.get_backend().platform UpperCAmelCase__ = { "`transformers` version": version, "Platform": platform.platform(), "Python version": platform.python_version(), "Huggingface_hub version": huggingface_hub.__version__, "Safetensors version": f"""{safetensors_version}""", "Accelerate version": f"""{accelerate_version}""", "Accelerate config": f"""{accelerate_config_str}""", "PyTorch version (GPU?)": f"""{pt_version} ({pt_cuda_available})""", "Tensorflow version (GPU?)": f"""{tf_version} ({tf_cuda_available})""", "Flax version (CPU?/GPU?/TPU?)": f"""{flax_version} ({jax_backend})""", "Jax version": f"""{jax_version}""", "JaxLib version": f"""{jaxlib_version}""", "Using GPU in script?": "<fill in>", "Using distributed or parallel set-up in script?": "<fill in>", } print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" ) print(self.format_dict(__UpperCAmelCase ) ) return info @staticmethod def lowercase_ (__UpperCAmelCase : Dict ) -> Optional[int]: """simple docstring""" return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
65
"""simple docstring""" import gc import unittest import numpy as np import torch from torch.backends.cuda import sdp_kernel from diffusers import ( CMStochasticIterativeScheduler, ConsistencyModelPipeline, UNetaDModel, ) from diffusers.utils import randn_tensor, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS from ..test_pipelines_common import PipelineTesterMixin enable_full_determinism() class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ): SCREAMING_SNAKE_CASE_ : Optional[Any] = ConsistencyModelPipeline SCREAMING_SNAKE_CASE_ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS SCREAMING_SNAKE_CASE_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS # Override required_optional_params to remove num_images_per_prompt SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset( [ '''num_inference_steps''', '''generator''', '''latents''', '''output_type''', '''return_dict''', '''callback''', '''callback_steps''', ] ) @property def _UpperCamelCase ( self ) -> Tuple: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,) return unet @property def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel.from_pretrained( '''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,) return unet def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]: """simple docstring""" if class_cond: __SCREAMING_SNAKE_CASE :str = self.dummy_cond_unet else: __SCREAMING_SNAKE_CASE :Optional[Any] = self.dummy_uncond_unet # Default to CM multistep sampler __SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) __SCREAMING_SNAKE_CASE :List[str] = { '''unet''': unet, '''scheduler''': scheduler, } return components def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0 ) -> Dict: """simple docstring""" if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ): __SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) else: __SCREAMING_SNAKE_CASE :Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Dict = { '''batch_size''': 1, '''num_inference_steps''': None, '''timesteps''': [22, 0], '''generator''': generator, '''output_type''': '''np''', } return inputs def _UpperCamelCase ( self ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE :List[str] = self.get_dummy_components() __SCREAMING_SNAKE_CASE :Optional[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :str = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE :List[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[int] = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[int] = 0 __SCREAMING_SNAKE_CASE :Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE :Dict = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_components() __SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :List[str] = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Dict = 1 __SCREAMING_SNAKE_CASE :List[str] = None __SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :int = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator __SCREAMING_SNAKE_CASE :Any = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Dict = pipe.to(SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = 1 __SCREAMING_SNAKE_CASE :Optional[Any] = None __SCREAMING_SNAKE_CASE :List[Any] = 0 __SCREAMING_SNAKE_CASE :Any = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 32, 32, 3) __SCREAMING_SNAKE_CASE :int = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @slow @require_torch_gpu class _SCREAMING_SNAKE_CASE( unittest.TestCase ): def _UpperCamelCase ( self ) -> List[str]: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> Dict: """simple docstring""" __SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Tuple = { '''num_inference_steps''': None, '''timesteps''': [22, 0], '''class_labels''': 0, '''generator''': generator, '''output_type''': '''np''', } if get_fixed_latents: __SCREAMING_SNAKE_CASE :int = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ,shape=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = latents return inputs def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> int: """simple docstring""" if type(SCREAMING_SNAKE_CASE__ ) == str: __SCREAMING_SNAKE_CASE :int = torch.device(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ) return latents def _UpperCamelCase ( self ) -> Any: """simple docstring""" __SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' ) __SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) __SCREAMING_SNAKE_CASE :Dict = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = self.get_inputs() __SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE :Union[str, Any] = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :Dict = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' ) __SCREAMING_SNAKE_CASE :List[Any] = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) __SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Optional[int] = self.get_inputs() __SCREAMING_SNAKE_CASE :int = 1 __SCREAMING_SNAKE_CASE :int = None __SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 @require_torch_a def _UpperCamelCase ( self ) -> str: """simple docstring""" __SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' ) __SCREAMING_SNAKE_CASE :Any = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) __SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ) # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ): __SCREAMING_SNAKE_CASE :Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :List[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 @require_torch_a def _UpperCamelCase ( self ) -> List[Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' ) __SCREAMING_SNAKE_CASE :Dict = CMStochasticIterativeScheduler( num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,) __SCREAMING_SNAKE_CASE :int = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ ) pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa ) pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :Dict = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ) __SCREAMING_SNAKE_CASE :int = 1 __SCREAMING_SNAKE_CASE :int = None # Ensure usage of flash attention in torch 2.0 with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ): __SCREAMING_SNAKE_CASE :str = pipe(**SCREAMING_SNAKE_CASE__ ).images assert image.shape == (1, 64, 64, 3) __SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1] __SCREAMING_SNAKE_CASE :Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
191
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _A = { """configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""], """feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""], """processing_wav2vec2""": ["""Wav2Vec2Processor"""], """tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """Wav2Vec2ForAudioFrameClassification""", """Wav2Vec2ForCTC""", """Wav2Vec2ForMaskedLM""", """Wav2Vec2ForPreTraining""", """Wav2Vec2ForSequenceClassification""", """Wav2Vec2ForXVector""", """Wav2Vec2Model""", """Wav2Vec2PreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""", """TFWav2Vec2ForCTC""", """TFWav2Vec2Model""", """TFWav2Vec2PreTrainedModel""", """TFWav2Vec2ForSequenceClassification""", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _A = [ """FlaxWav2Vec2ForCTC""", """FlaxWav2Vec2ForPreTraining""", """FlaxWav2Vec2Model""", """FlaxWav2Vec2PreTrainedModel""", ] if TYPE_CHECKING: from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig from .feature_extraction_wavaveca import WavaVecaFeatureExtractor from .processing_wavaveca import WavaVecaProcessor from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_wavaveca import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaForAudioFrameClassification, WavaVecaForCTC, WavaVecaForMaskedLM, WavaVecaForPreTraining, WavaVecaForSequenceClassification, WavaVecaForXVector, WavaVecaModel, WavaVecaPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, TFWavaVecaForCTC, TFWavaVecaForSequenceClassification, TFWavaVecaModel, TFWavaVecaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_wavaveca import ( FlaxWavaVecaForCTC, FlaxWavaVecaForPreTraining, FlaxWavaVecaModel, FlaxWavaVecaPreTrainedModel, ) else: import sys _A = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
166
"""simple docstring""" import os from typing import Dict, List, Union import tensorflow as tf from keras_nlp.tokenizers import BytePairTokenizer from tensorflow_text import pad_model_inputs from .tokenization_gpta import GPTaTokenizer class lowerCamelCase ( tf.keras.layers.Layer ): '''simple docstring''' def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = None , _lowerCamelCase = None ): """simple docstring""" super().__init__() UpperCAmelCase__ : Tuple = pad_token_id UpperCAmelCase__ : Any = max_length UpperCAmelCase__ : str = vocab UpperCAmelCase__ : Union[str, Any] = merges UpperCAmelCase__ : Tuple = BytePairTokenizer(_lowerCamelCase , _lowerCamelCase , sequence_length=_lowerCamelCase ) @classmethod def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Any = [""" """.join(_lowerCamelCase ) for m in tokenizer.bpe_ranks.keys()] UpperCAmelCase__ : Tuple = tokenizer.get_vocab() return cls(_lowerCamelCase , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) @classmethod def _a (cls , _lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ): """simple docstring""" UpperCAmelCase__ : Optional[Any] = GPTaTokenizer.from_pretrained(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) return cls.from_tokenizer(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase ) @classmethod def _a (cls , _lowerCamelCase ): """simple docstring""" return cls(**_lowerCamelCase ) def _a (self ): """simple docstring""" return { "vocab": self.vocab, "merges": self.merges, "max_length": self.max_length, "pad_token_id": self.pad_token_id, } def _a (self , _lowerCamelCase , _lowerCamelCase = None ): """simple docstring""" UpperCAmelCase__ : List[str] = self.tf_tokenizer(_lowerCamelCase ) UpperCAmelCase__ : Optional[Any] = tf.ones_like(_lowerCamelCase ) if self.pad_token_id is not None: # pad the tokens up to max length UpperCAmelCase__ : Optional[Any] = max_length if max_length is not None else self.max_length if max_length is not None: UpperCAmelCase__ , UpperCAmelCase__ : str = pad_model_inputs( _lowerCamelCase , max_seq_length=_lowerCamelCase , pad_value=self.pad_token_id ) return {"attention_mask": attention_mask, "input_ids": input_ids}
166
1
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase__ = logging.get_logger(__name__) lowerCAmelCase__ = { 'xlm-mlm-en-2048': 'https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json', 'xlm-mlm-ende-1024': 'https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json', 'xlm-mlm-enfr-1024': 'https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json', 'xlm-mlm-enro-1024': 'https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json', 'xlm-mlm-tlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json', 'xlm-mlm-xnli15-1024': 'https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json', 'xlm-clm-enfr-1024': 'https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json', 'xlm-clm-ende-1024': 'https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json', 'xlm-mlm-17-1280': 'https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json', 'xlm-mlm-100-1280': 'https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json', } class lowerCAmelCase__ ( a): '''simple docstring''' __SCREAMING_SNAKE_CASE = "xlm" __SCREAMING_SNAKE_CASE = { "hidden_size": "emb_dim", "num_attention_heads": "n_heads", "num_hidden_layers": "n_layers", "n_words": "vocab_size", # For backward compatibility } def __init__( self , __lowerCamelCase=3_0_1_4_5 , __lowerCamelCase=2_0_4_8 , __lowerCamelCase=1_2 , __lowerCamelCase=1_6 , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=False , __lowerCamelCase=1 , __lowerCamelCase=True , __lowerCamelCase=5_1_2 , __lowerCamelCase=2_0_4_8**-0.5 , __lowerCamelCase=1e-12 , __lowerCamelCase=0.0_2 , __lowerCamelCase=0 , __lowerCamelCase=1 , __lowerCamelCase=2 , __lowerCamelCase=3 , __lowerCamelCase=5 , __lowerCamelCase=True , __lowerCamelCase="first" , __lowerCamelCase=True , __lowerCamelCase=None , __lowerCamelCase=True , __lowerCamelCase=0.1 , __lowerCamelCase=5 , __lowerCamelCase=5 , __lowerCamelCase=0 , __lowerCamelCase=0 , __lowerCamelCase=2 , __lowerCamelCase=0 , **__lowerCamelCase , ) -> Optional[int]: _A : Optional[Any] = vocab_size _A : Optional[Any] = emb_dim _A : Union[str, Any] = n_layers _A : int = n_heads _A : Union[str, Any] = dropout _A : List[str] = attention_dropout _A : Tuple = gelu_activation _A : Dict = sinusoidal_embeddings _A : int = causal _A : int = asm _A : int = n_langs _A : int = use_lang_emb _A : Union[str, Any] = layer_norm_eps _A : Union[str, Any] = bos_index _A : Union[str, Any] = eos_index _A : Tuple = pad_index _A : Any = unk_index _A : Dict = mask_index _A : str = is_encoder _A : Union[str, Any] = max_position_embeddings _A : Optional[int] = embed_init_std _A : List[str] = init_std _A : Optional[int] = summary_type _A : Optional[Any] = summary_use_proj _A : Dict = summary_activation _A : Optional[Any] = summary_proj_to_labels _A : Any = summary_first_dropout _A : str = start_n_top _A : Any = end_n_top _A : int = mask_token_id _A : List[Any] = lang_id if "n_words" in kwargs: _A : Any = kwargs["n_words"] super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , **__lowerCamelCase) class lowerCAmelCase__ ( a): '''simple docstring''' @property def _lowerCamelCase ( self) -> Mapping[str, Mapping[int, str]]: if self.task == "multiple-choice": _A : List[str] = {0: "batch", 1: "choice", 2: "sequence"} else: _A : List[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ])
11
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_torch_available, ) lowerCAmelCase__ = { 'configuration_speecht5': [ 'SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP', 'SpeechT5Config', 'SpeechT5HifiGanConfig', ], 'feature_extraction_speecht5': ['SpeechT5FeatureExtractor'], 'processing_speecht5': ['SpeechT5Processor'], } try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = ['SpeechT5Tokenizer'] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase__ = [ 'SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST', 'SpeechT5ForSpeechToText', 'SpeechT5ForSpeechToSpeech', 'SpeechT5ForTextToSpeech', 'SpeechT5Model', 'SpeechT5PreTrainedModel', 'SpeechT5HifiGan', ] if TYPE_CHECKING: from .configuration_speechta import ( SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP, SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP, SpeechTaConfig, SpeechTaHifiGanConfig, ) from .feature_extraction_speechta import SpeechTaFeatureExtractor from .processing_speechta import SpeechTaProcessor try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_speechta import SpeechTaTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_speechta import ( SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST, SpeechTaForSpeechToSpeech, SpeechTaForSpeechToText, SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaModel, SpeechTaPreTrainedModel, ) else: import sys lowerCAmelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
11
1
import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def lowerCAmelCase_ ( _snake_case : List[str] ) -> Tuple: '''simple docstring''' __magic_name__ : Dict = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(_snake_case , _snake_case ) def lowerCAmelCase_ ( _snake_case : Optional[int] ) -> Union[str, Any]: '''simple docstring''' __magic_name__ : Optional[Any] = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: __magic_name__ : Optional[Any] = s_dict.pop(_snake_case ) elif "subsample" in key: __magic_name__ : str = s_dict.pop(_snake_case ) def lowerCAmelCase_ ( _snake_case : List[Any] ) -> Tuple: '''simple docstring''' __magic_name__ : Dict = emb.weight.shape __magic_name__ : List[Any] = nn.Linear(_snake_case , _snake_case , bias=_snake_case ) __magic_name__ : str = emb.weight.data return lin_layer def lowerCAmelCase_ ( _snake_case : List[Any] , _snake_case : Dict ) -> int: '''simple docstring''' __magic_name__ : Tuple = torch.load(_snake_case , map_location="cpu" ) __magic_name__ : str = mam_aaa["args"] __magic_name__ : Union[str, Any] = mam_aaa["model"] __magic_name__ : List[str] = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(_snake_case ) rename_keys(_snake_case ) __magic_name__ : Optional[Any] = state_dict["decoder.embed_tokens.weight"].shape[0] __magic_name__ : int = args.share_decoder_input_output_embed __magic_name__ : List[str] = [int(_snake_case ) for i in args.conv_kernel_sizes.split("," )] __magic_name__ : Dict = SpeechaTextConfig( vocab_size=_snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(_snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=_snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=_snake_case , num_beams=5 , max_length=200 , use_cache=_snake_case , decoder_start_token_id=2 , early_stopping=_snake_case , ) __magic_name__ : str = SpeechaTextForConditionalGeneration(_snake_case ) __magic_name__ : Any = model.model.load_state_dict(_snake_case , strict=_snake_case ) if len(_snake_case ) > 0 and not set(_snake_case ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," F''' but all the following weights are missing {missing}''' ) if tie_embeds: __magic_name__ : str = make_linear_from_emb(model.model.decoder.embed_tokens ) else: __magic_name__ : Optional[Any] = lm_head_weights model.save_pretrained(_snake_case ) if __name__ == "__main__": snake_case : Any = argparse.ArgumentParser() # Required parameters parser.add_argument("--fairseq_path", type=str, help="Path to the fairseq model (.pt) file.") parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") snake_case : Union[str, Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
358
from __future__ import annotations snake_case : Optional[int] = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], "C": ["A", "F", "G"], "D": ["B"], "E": ["A", "B", "D"], "F": ["C"], "G": ["C"], } class _snake_case : def __init__( self , _a , _a ): __magic_name__ : Any = graph # mapping node to its parent in resulting breadth first tree __magic_name__ : dict[str, str | None] = {} __magic_name__ : List[str] = source_vertex def SCREAMING_SNAKE_CASE ( self ): __magic_name__ : List[str] = {self.source_vertex} __magic_name__ : Optional[int] = None __magic_name__ : int = [self.source_vertex] # first in first out queue while queue: __magic_name__ : Optional[Any] = queue.pop(0 ) for adjacent_vertex in self.graph[vertex]: if adjacent_vertex not in visited: visited.add(_a ) __magic_name__ : Dict = vertex queue.append(_a ) def SCREAMING_SNAKE_CASE ( self , _a ): if target_vertex == self.source_vertex: return self.source_vertex __magic_name__ : str = self.parent.get(_a ) if target_vertex_parent is None: __magic_name__ : Union[str, Any] = ( f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}''' ) raise ValueError(_a ) return self.shortest_path(_a ) + f'''->{target_vertex}''' if __name__ == "__main__": snake_case : int = Graph(graph, "G") g.breath_first_search() print(g.shortest_path("D")) print(g.shortest_path("G")) print(g.shortest_path("Foo"))
41
0
import argparse import logging import os from datetime import datetime import numpy as np import torch from torch import nn from torch.utils.data import DataLoader, RandomSampler, TensorDataset from tqdm import tqdm from transformers import GPTaLMHeadModel lowerCAmelCase_ = logging.getLogger(__name__) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Any: """simple docstring""" if os.path.exists(_UpperCamelCase ): if os.path.exists(os.path.join(_UpperCamelCase , '''config.json''' ) ) and os.path.isfile( os.path.join(_UpperCamelCase , '''config.json''' ) ): os.remove(os.path.join(_UpperCamelCase , '''config.json''' ) ) if os.path.exists(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) and os.path.isfile( os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ): os.remove(os.path.join(_UpperCamelCase , '''pytorch_model.bin''' ) ) else: os.makedirs(_UpperCamelCase ) model.save_pretrained(_UpperCamelCase ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase=False ) -> str: """simple docstring""" snake_case_ : List[Any] = 2 if unlogit: snake_case_ : List[str] = torch.pow(_UpperCamelCase , _UpperCamelCase ) snake_case_ : Optional[Any] = p * torch.log(_UpperCamelCase ) snake_case_ : List[Any] = 0 return -plogp.sum(dim=-1 ) def lowerCamelCase_ ( _UpperCamelCase ) -> int: """simple docstring""" logger.info('''lv, h >\t''' + '''\t'''.join(f'''{x + 1}''' for x in range(len(_UpperCamelCase ) ) ) ) for row in range(len(_UpperCamelCase ) ): if tensor.dtype != torch.long: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:.5f}''' for x in tensor[row].cpu().data ) ) else: logger.info(f'''layer {row + 1}:\t''' + '''\t'''.join(f'''{x:d}''' for x in tensor[row].cpu().data ) ) def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=True , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=False ) -> Tuple: """simple docstring""" snake_case_ , snake_case_ : Optional[int] = model.config.num_hidden_layers, model.config.num_attention_heads snake_case_ : Optional[int] = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device ) snake_case_ : Tuple = torch.zeros(_UpperCamelCase , _UpperCamelCase ).to(args.device ) if head_mask is None: snake_case_ : Optional[Any] = torch.ones(_UpperCamelCase , _UpperCamelCase ).to(args.device ) head_mask.requires_grad_(requires_grad=_UpperCamelCase ) # If actually pruned attention multi-head, set head mask to None to avoid shape mismatch if actually_pruned: snake_case_ : Optional[int] = None snake_case_ : Dict = 0.0 snake_case_ : Tuple = 0.0 for step, inputs in enumerate(tqdm(_UpperCamelCase , desc='''Iteration''' , disable=args.local_rank not in [-1, 0] ) ): snake_case_ : List[str] = tuple(t.to(args.device ) for t in inputs ) ((snake_case_) , ) : Optional[Any] = inputs # Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below) snake_case_ : List[str] = model(_UpperCamelCase , labels=_UpperCamelCase , head_mask=_UpperCamelCase ) # (loss), lm_logits, presents, (all hidden_states), (attentions) snake_case_ , snake_case_ , snake_case_ : str = ( outputs[0], outputs[1], outputs[-1], ) # Loss and logits are the first, attention the last loss.backward() # Backpropagate to populate the gradients in the head mask total_loss += loss.detach().cpu().numpy() if compute_entropy: for layer, attn in enumerate(_UpperCamelCase ): snake_case_ : List[Any] = entropy(attn.detach() , _UpperCamelCase ) attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach() if compute_importance: head_importance += head_mask.grad.abs().detach() tot_tokens += torch.ones_like(_UpperCamelCase ).float().detach().sum().data # Normalize attn_entropy /= tot_tokens head_importance /= tot_tokens # Layerwise importance normalization if not args.dont_normalize_importance_by_layer: snake_case_ : Optional[int] = 2 snake_case_ : List[Any] = torch.pow(torch.pow(_UpperCamelCase , _UpperCamelCase ).sum(-1 ) , 1 / exponent ) head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20 if not args.dont_normalize_global_importance: snake_case_ : Dict = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min()) # Print matrices if compute_entropy: logger.info('''Attention entropies''' ) print_ad_tensor(_UpperCamelCase ) if compute_importance: logger.info('''Head importance scores''' ) print_ad_tensor(_UpperCamelCase ) logger.info('''Head ranked by importance scores''' ) snake_case_ : List[str] = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device ) snake_case_ : Union[str, Any] = torch.arange( head_importance.numel() , device=args.device ) snake_case_ : Tuple = head_ranks.view_as(_UpperCamelCase ) print_ad_tensor(_UpperCamelCase ) return attn_entropy, head_importance, total_loss def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" snake_case_ , snake_case_ , snake_case_ : Dict = compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase ) snake_case_ : List[str] = 1 / loss # instead of downsteam score use the LM loss logger.info('''Pruning: original score: %f, threshold: %f''' , _UpperCamelCase , original_score * args.masking_threshold ) snake_case_ : Any = torch.ones_like(_UpperCamelCase ) snake_case_ : str = max(1 , int(new_head_mask.numel() * args.masking_amount ) ) snake_case_ : Any = original_score while current_score >= original_score * args.masking_threshold: snake_case_ : List[str] = new_head_mask.clone().detach() # save current head mask # heads from least important to most - keep only not-masked heads snake_case_ : Union[str, Any] = float('''Inf''' ) snake_case_ : Optional[int] = head_importance.view(-1 ).sort()[1] if len(_UpperCamelCase ) <= num_to_mask: print('''BREAK BY num_to_mask''' ) break # mask heads snake_case_ : Optional[Any] = current_heads_to_mask[:num_to_mask] logger.info('''Heads to mask: %s''' , str(current_heads_to_mask.tolist() ) ) snake_case_ : str = new_head_mask.view(-1 ) snake_case_ : Dict = 0.0 snake_case_ : str = new_head_mask.view_as(_UpperCamelCase ) snake_case_ : Any = new_head_mask.clone().detach() print_ad_tensor(_UpperCamelCase ) # Compute metric and head importance again snake_case_ , snake_case_ , snake_case_ : List[Any] = compute_heads_importance( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , head_mask=_UpperCamelCase ) snake_case_ : Dict = 1 / loss logger.info( '''Masking: current score: %f, remaining heads %d (%.1f percents)''' , _UpperCamelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , ) logger.info('''Final head mask''' ) print_ad_tensor(_UpperCamelCase ) np.save(os.path.join(args.output_dir , '''head_mask.npy''' ) , head_mask.detach().cpu().numpy() ) return head_mask def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[int]: """simple docstring""" snake_case_ : Any = datetime.now() snake_case_ , snake_case_ , snake_case_ : str = compute_heads_importance( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase ) snake_case_ : Optional[Any] = 1 / loss snake_case_ : int = datetime.now() - before_time snake_case_ : List[Any] = sum(p.numel() for p in model.parameters() ) snake_case_ : List[str] = { layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(_UpperCamelCase ) ) } for k, v in heads_to_prune.items(): if isinstance(_UpperCamelCase , _UpperCamelCase ): snake_case_ : Dict = [ v, ] assert sum(len(_UpperCamelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item() model.prune_heads(_UpperCamelCase ) snake_case_ : Dict = sum(p.numel() for p in model.parameters() ) snake_case_ : str = datetime.now() snake_case_ , snake_case_ , snake_case_ : Tuple = compute_heads_importance( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , compute_entropy=_UpperCamelCase , compute_importance=_UpperCamelCase , head_mask=_UpperCamelCase , actually_pruned=_UpperCamelCase , ) snake_case_ : List[Any] = 1 / loss snake_case_ : Dict = datetime.now() - before_time logger.info( '''Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)''' , _UpperCamelCase , _UpperCamelCase , pruned_num_params / original_num_params * 100 , ) logger.info('''Pruning: score with masking: %f score with pruning: %f''' , _UpperCamelCase , _UpperCamelCase ) logger.info('''Pruning: speed ratio (original timing / new timing): %f percents''' , original_time / new_time * 100 ) save_model(_UpperCamelCase , args.output_dir ) def lowerCamelCase_ ( ) -> Optional[Any]: """simple docstring""" snake_case_ : str = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--data_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The input data dir. Should contain the .tsv files (or other data files) for the task.''' , ) parser.add_argument( '''--model_name_or_path''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''Path to pretrained model or model identifier from huggingface.co/models''' , ) parser.add_argument( '''--output_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='''The output directory where the model predictions and checkpoints will be written.''' , ) # Other parameters parser.add_argument( '''--config_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained config name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--tokenizer_name''' , default='''''' , type=_UpperCamelCase , help='''Pretrained tokenizer name or path if not the same as model_name_or_path''' , ) parser.add_argument( '''--cache_dir''' , default=_UpperCamelCase , type=_UpperCamelCase , help='''Where do you want to store the pre-trained models downloaded from s3''' , ) parser.add_argument( '''--data_subset''' , type=_UpperCamelCase , default=-1 , help='''If > 0: limit the data to a subset of data_subset instances.''' ) parser.add_argument( '''--overwrite_output_dir''' , action='''store_true''' , help='''Whether to overwrite data in output directory''' ) parser.add_argument( '''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' ) parser.add_argument( '''--dont_normalize_importance_by_layer''' , action='''store_true''' , help='''Don\'t normalize importance score by layers''' ) parser.add_argument( '''--dont_normalize_global_importance''' , action='''store_true''' , help='''Don\'t normalize all importance scores between 0 and 1''' , ) parser.add_argument( '''--try_masking''' , action='''store_true''' , help='''Whether to try to mask head until a threshold of accuracy.''' ) parser.add_argument( '''--masking_threshold''' , default=0.9 , type=_UpperCamelCase , help='''masking threshold in term of metrics (stop masking when metric < threshold * original metric value).''' , ) parser.add_argument( '''--masking_amount''' , default=0.1 , type=_UpperCamelCase , help='''Amount to heads to masking at each masking step.''' ) parser.add_argument('''--metric_name''' , default='''acc''' , type=_UpperCamelCase , help='''Metric to use for head masking.''' ) parser.add_argument( '''--max_seq_length''' , default=128 , type=_UpperCamelCase , help=( '''The maximum total input sequence length after WordPiece tokenization. \n''' '''Sequences longer than this will be truncated, sequences shorter padded.''' ) , ) parser.add_argument('''--batch_size''' , default=1 , type=_UpperCamelCase , help='''Batch size.''' ) parser.add_argument('''--seed''' , type=_UpperCamelCase , default=42 ) parser.add_argument('''--local_rank''' , type=_UpperCamelCase , default=-1 , help='''local_rank for distributed training on gpus''' ) parser.add_argument('''--no_cuda''' , action='''store_true''' , help='''Whether not to use CUDA when available''' ) parser.add_argument('''--server_ip''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) parser.add_argument('''--server_port''' , type=_UpperCamelCase , default='''''' , help='''Can be used for distant debugging.''' ) snake_case_ : Optional[int] = parser.parse_args() if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('''Waiting for debugger attach''' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=_UpperCamelCase ) ptvsd.wait_for_attach() # Setup devices and distributed training if args.local_rank == -1 or args.no_cuda: snake_case_ : Optional[int] = torch.device('''cuda''' if torch.cuda.is_available() and not args.no_cuda else '''cpu''' ) snake_case_ : List[str] = 0 if args.no_cuda else torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank ) snake_case_ : Union[str, Any] = torch.device('''cuda''' , args.local_rank ) snake_case_ : Tuple = 1 torch.distributed.init_process_group(backend='''nccl''' ) # Initializes the distributed backend # Setup logging logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN ) logger.info('''device: {} n_gpu: {}, distributed: {}'''.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) ) snake_case_ : Optional[int] = GPTaLMHeadModel.from_pretrained(args.model_name_or_path ) # Distributed and parallel training model.to(args.device ) if args.local_rank != -1: snake_case_ : int = nn.parallel.DistributedDataParallel( _UpperCamelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=_UpperCamelCase ) elif args.n_gpu > 1: snake_case_ : str = nn.DataParallel(_UpperCamelCase ) # Print/save training arguments os.makedirs(args.output_dir , exist_ok=_UpperCamelCase ) torch.save(_UpperCamelCase , os.path.join(args.output_dir , '''run_args.bin''' ) ) logger.info('''Training/evaluation parameters %s''' , _UpperCamelCase ) # Prepare dataset snake_case_ : Any = np.concatenate( [ np.loadtxt(args.data_dir , dtype=np.intaa ), ] ) snake_case_ : str = (torch.from_numpy(_UpperCamelCase ),) snake_case_ : Union[str, Any] = TensorDataset(*_UpperCamelCase ) snake_case_ : Optional[Any] = RandomSampler(_UpperCamelCase ) snake_case_ : Any = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase , batch_size=args.batch_size ) # Compute head entropy and importance score compute_heads_importance(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Try head masking (set heads to zero until the score goes under a threshole) # and head pruning (remove masked heads and see the effect on the network) if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0: snake_case_ : Any = mask_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) prune_heads(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) if __name__ == "__main__": main()
279
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> List[str]: """simple docstring""" print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' ) for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): if dist[i][j] != float('''inf''' ): print(int(dist[i][j] ) , end='''\t''' ) else: print('''INF''' , end='''\t''' ) print() def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]: """simple docstring""" snake_case_ : int = [[float('''inf''' ) for _ in range(_UpperCamelCase )] for _ in range(_UpperCamelCase )] for i in range(_UpperCamelCase ): for j in range(_UpperCamelCase ): snake_case_ : Dict = graph[i][j] # check vertex k against all other vertices (i, j) for k in range(_UpperCamelCase ): # looping through rows of graph array for i in range(_UpperCamelCase ): # looping through columns of graph array for j in range(_UpperCamelCase ): if ( dist[i][k] != float('''inf''' ) and dist[k][j] != float('''inf''' ) and dist[i][k] + dist[k][j] < dist[i][j] ): snake_case_ : List[Any] = dist[i][k] + dist[k][j] _print_dist(_UpperCamelCase , _UpperCamelCase ) return dist, v if __name__ == "__main__": lowerCAmelCase_ = int(input('''Enter number of vertices: ''')) lowerCAmelCase_ = int(input('''Enter number of edges: ''')) lowerCAmelCase_ = [[float('''inf''') for i in range(v)] for j in range(v)] for i in range(v): lowerCAmelCase_ = 0.0 # src and dst are indices that must be within the array size graph[e][v] # failure to follow this will result in an error for i in range(e): print('''\nEdge ''', i + 1) lowerCAmelCase_ = int(input('''Enter source:''')) lowerCAmelCase_ = int(input('''Enter destination:''')) lowerCAmelCase_ = float(input('''Enter weight:''')) lowerCAmelCase_ = weight floyd_warshall(graph, v) # Example Input # Enter number of vertices: 3 # Enter number of edges: 2 # # generated graph from vertex and edge inputs # [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]] # [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]] # specify source, destination and weight for edge #1 # Edge 1 # Enter source:1 # Enter destination:2 # Enter weight:2 # specify source, destination and weight for edge #2 # Edge 2 # Enter source:2 # Enter destination:1 # Enter weight:1 # # Expected Output from the vertice, edge and src, dst, weight inputs!! # 0 INF INF # INF 0 2 # INF 1 0
279
1
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ..auto import CONFIG_MAPPING _A = logging.get_logger(__name__) _A = { """microsoft/table-transformer-detection""": ( """https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json""" ), } class _lowerCamelCase ( _snake_case ): _lowerCamelCase :Any = "table-transformer" _lowerCamelCase :List[str] = ["past_key_values"] _lowerCamelCase :Any = { "hidden_size": "d_model", "num_attention_heads": "encoder_attention_heads", } def __init__( self : List[Any] , UpperCamelCase : Optional[int]=True , UpperCamelCase : List[str]=None , UpperCamelCase : Dict=3 , UpperCamelCase : Dict=1_00 , UpperCamelCase : Optional[int]=6 , UpperCamelCase : List[str]=20_48 , UpperCamelCase : Optional[Any]=8 , UpperCamelCase : Any=6 , UpperCamelCase : Dict=20_48 , UpperCamelCase : List[Any]=8 , UpperCamelCase : int=0.0 , UpperCamelCase : List[Any]=0.0 , UpperCamelCase : str=True , UpperCamelCase : Tuple="relu" , UpperCamelCase : Optional[Any]=2_56 , UpperCamelCase : Any=0.1 , UpperCamelCase : Optional[int]=0.0 , UpperCamelCase : Union[str, Any]=0.0 , UpperCamelCase : Dict=0.02 , UpperCamelCase : Dict=1.0 , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[int]="sine" , UpperCamelCase : int="resnet50" , UpperCamelCase : Optional[int]=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : Optional[int]=1 , UpperCamelCase : Dict=5 , UpperCamelCase : Dict=2 , UpperCamelCase : List[str]=1 , UpperCamelCase : Union[str, Any]=1 , UpperCamelCase : Optional[Any]=5 , UpperCamelCase : Tuple=2 , UpperCamelCase : List[str]=0.1 , **UpperCamelCase : Union[str, Any] , ) -> str: """simple docstring""" if backbone_config is not None and use_timm_backbone: raise ValueError("""You can't specify both `backbone_config` and `use_timm_backbone`.""" ) if not use_timm_backbone: if backbone_config is None: logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" ) lowerCAmelCase__ : Tuple = CONFIG_MAPPING["""resnet"""](out_features=["""stage4"""] ) elif isinstance(UpperCamelCase__ , UpperCamelCase__ ): lowerCAmelCase__ : str = backbone_config.get("""model_type""" ) lowerCAmelCase__ : str = CONFIG_MAPPING[backbone_model_type] lowerCAmelCase__ : str = config_class.from_dict(UpperCamelCase__ ) # set timm attributes to None lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = None, None, None lowerCAmelCase__ : str = use_timm_backbone lowerCAmelCase__ : Dict = backbone_config lowerCAmelCase__ : Union[str, Any] = num_channels lowerCAmelCase__ : int = num_queries lowerCAmelCase__ : str = d_model lowerCAmelCase__ : Any = encoder_ffn_dim lowerCAmelCase__ : Any = encoder_layers lowerCAmelCase__ : Union[str, Any] = encoder_attention_heads lowerCAmelCase__ : List[str] = decoder_ffn_dim lowerCAmelCase__ : int = decoder_layers lowerCAmelCase__ : int = decoder_attention_heads lowerCAmelCase__ : int = dropout lowerCAmelCase__ : Any = attention_dropout lowerCAmelCase__ : Union[str, Any] = activation_dropout lowerCAmelCase__ : Union[str, Any] = activation_function lowerCAmelCase__ : List[str] = init_std lowerCAmelCase__ : List[str] = init_xavier_std lowerCAmelCase__ : Optional[Any] = encoder_layerdrop lowerCAmelCase__ : int = decoder_layerdrop lowerCAmelCase__ : Tuple = encoder_layers lowerCAmelCase__ : str = auxiliary_loss lowerCAmelCase__ : Optional[int] = position_embedding_type lowerCAmelCase__ : str = backbone lowerCAmelCase__ : int = use_pretrained_backbone lowerCAmelCase__ : Union[str, Any] = dilation # Hungarian matcher lowerCAmelCase__ : List[Any] = class_cost lowerCAmelCase__ : Optional[int] = bbox_cost lowerCAmelCase__ : List[Any] = giou_cost # Loss coefficients lowerCAmelCase__ : str = mask_loss_coefficient lowerCAmelCase__ : int = dice_loss_coefficient lowerCAmelCase__ : Any = bbox_loss_coefficient lowerCAmelCase__ : int = giou_loss_coefficient lowerCAmelCase__ : Tuple = eos_coefficient super().__init__(is_encoder_decoder=UpperCamelCase__ , **UpperCamelCase__ ) @property def _lowerCAmelCase ( self : Tuple ) -> int: """simple docstring""" return self.encoder_attention_heads @property def _lowerCAmelCase ( self : Union[str, Any] ) -> int: """simple docstring""" return self.d_model class _lowerCamelCase ( _snake_case ): _lowerCamelCase :Optional[int] = version.parse("1.11" ) @property def _lowerCAmelCase ( self : Union[str, Any] ) -> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ("""pixel_mask""", {0: """batch"""}), ] ) @property def _lowerCAmelCase ( self : int ) -> float: """simple docstring""" return 1E-5 @property def _lowerCAmelCase ( self : Optional[Any] ) -> int: """simple docstring""" return 12
363
"""simple docstring""" from __future__ import annotations import math from collections import Counter from string import ascii_lowercase def lowercase_ ( __UpperCAmelCase ) -> None: lowerCAmelCase__ , lowerCAmelCase__ : int = analyze_text(__UpperCAmelCase ) lowerCAmelCase__ : Optional[Any] = list(""" """ + ascii_lowercase ) # what is our total sum of probabilities. lowerCAmelCase__ : List[str] = sum(single_char_strings.values() ) # one length string lowerCAmelCase__ : List[str] = 0 # for each alpha we go in our dict and if it is in it we calculate entropy for ch in my_alphas: if ch in single_char_strings: lowerCAmelCase__ : List[Any] = single_char_strings[ch] lowerCAmelCase__ : int = my_str / all_sum my_fir_sum += prob * math.loga(__UpperCAmelCase ) # entropy formula. # print entropy print(f"""{round(-1 * my_fir_sum ):.1f}""" ) # two len string lowerCAmelCase__ : Tuple = sum(two_char_strings.values() ) lowerCAmelCase__ : str = 0 # for each alpha (two in size) calculate entropy. for cha in my_alphas: for cha in my_alphas: lowerCAmelCase__ : Optional[int] = cha + cha if sequence in two_char_strings: lowerCAmelCase__ : int = two_char_strings[sequence] lowerCAmelCase__ : str = int(__UpperCAmelCase ) / all_sum my_sec_sum += prob * math.loga(__UpperCAmelCase ) # print second entropy print(f"""{round(-1 * my_sec_sum ):.1f}""" ) # print the difference between them print(f"""{round((-1 * my_sec_sum) - (-1 * my_fir_sum) ):.1f}""" ) def lowercase_ ( __UpperCAmelCase ) -> tuple[dict, dict]: lowerCAmelCase__ : Any = Counter() # type: ignore lowerCAmelCase__ : Tuple = Counter() # type: ignore single_char_strings[text[-1]] += 1 # first case when we have space at start. two_char_strings[" " + text[0]] += 1 for i in range(0 , len(__UpperCAmelCase ) - 1 ): single_char_strings[text[i]] += 1 two_char_strings[text[i : i + 2]] += 1 return single_char_strings, two_char_strings def lowercase_ ( ) -> Any: import doctest doctest.testmod() # text = ( # "Had repulsive dashwoods suspicion sincerity but advantage now him. Remark " # "easily garret nor nay. Civil those mrs enjoy shy fat merry. You greatest " # "jointure saw horrible. He private he on be imagine suppose. Fertile " # "beloved evident through no service elderly is. Blind there if every no so " # "at. Own neglected you preferred way sincerity delivered his attempted. To " # "of message cottage windows do besides against uncivil. Delightful " # "unreserved impossible few estimating men favourable see entreaties. She " # "propriety immediate was improving. He or entrance humoured likewise " # "moderate. Much nor game son say feel. Fat make met can must form into " # "gate. Me we offending prevailed discovery. " # ) # calculate_prob(text) if __name__ == "__main__": main()
212
0
'''simple docstring''' class _snake_case : def __init__( self ) -> Optional[int]: '''simple docstring''' snake_case_ = "" snake_case_ = "" snake_case_ = [] def lowerCAmelCase__ ( self , a__ , a__ ) -> int: '''simple docstring''' if m == -1: return n + 1 elif n == -1: return m + 1 elif self.dp[m][n] > -1: return self.dp[m][n] else: if self.worda[m] == self.worda[n]: snake_case_ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) else: snake_case_ = self.__min_dist_top_down_dp(a__ , n - 1 ) snake_case_ = self.__min_dist_top_down_dp(m - 1 , a__ ) snake_case_ = self.__min_dist_top_down_dp(m - 1 , n - 1 ) snake_case_ = 1 + min(a__ , a__ , a__ ) return self.dp[m][n] def lowerCAmelCase__ ( self , a__ , a__ ) -> int: '''simple docstring''' snake_case_ = worda snake_case_ = worda snake_case_ = [[-1 for _ in range(len(a__ ) )] for _ in range(len(a__ ) )] return self.__min_dist_top_down_dp(len(a__ ) - 1 , len(a__ ) - 1 ) def lowerCAmelCase__ ( self , a__ , a__ ) -> int: '''simple docstring''' snake_case_ = worda snake_case_ = worda snake_case_ = len(a__ ) snake_case_ = len(a__ ) snake_case_ = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )] for i in range(m + 1 ): for j in range(n + 1 ): if i == 0: # first string is empty snake_case_ = j elif j == 0: # second string is empty snake_case_ = i elif worda[i - 1] == worda[j - 1]: # last characters are equal snake_case_ = self.dp[i - 1][j - 1] else: snake_case_ = self.dp[i][j - 1] snake_case_ = self.dp[i - 1][j] snake_case_ = self.dp[i - 1][j - 1] snake_case_ = 1 + min(a__ , a__ , a__ ) return self.dp[m][n] if __name__ == "__main__": _SCREAMING_SNAKE_CASE : str = EditDistance() print("****************** Testing Edit Distance DP Algorithm ******************") print() _SCREAMING_SNAKE_CASE : int = input("Enter the first string: ").strip() _SCREAMING_SNAKE_CASE : List[str] = input("Enter the second string: ").strip() print() print(F"The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}") print(F"The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}") print() print("*************** End of Testing Edit Distance DP Algorithm ***************")
85
'''simple docstring''' import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : int = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"} _SCREAMING_SNAKE_CASE : Union[str, Any] = { "tokenizer_file": { "EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json", }, } _SCREAMING_SNAKE_CASE : int = { "gpt-neox-20b": 2048, } class _snake_case ( lowercase_ ): lowerCAmelCase_ : str = VOCAB_FILES_NAMES lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase_ : str = ["input_ids", "attention_mask"] def __init__( self , a__=None , a__=None , a__=None , a__="<|endoftext|>" , a__="<|endoftext|>" , a__="<|endoftext|>" , a__=False , **a__ , ) -> Tuple: '''simple docstring''' super().__init__( a__ , a__ , tokenizer_file=a__ , unk_token=a__ , bos_token=a__ , eos_token=a__ , add_prefix_space=a__ , **a__ , ) snake_case_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , a__ ) != add_prefix_space: snake_case_ = getattr(a__ , pre_tok_state.pop("type" ) ) snake_case_ = add_prefix_space snake_case_ = pre_tok_class(**a__ ) snake_case_ = add_prefix_space def lowerCAmelCase__ ( self , a__ , a__ = None ) -> Tuple[str]: '''simple docstring''' snake_case_ = self._tokenizer.model.save(a__ , name=a__ ) return tuple(a__ ) def lowerCAmelCase__ ( self , a__ ) -> List[int]: '''simple docstring''' snake_case_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(a__ , add_special_tokens=a__ ) + [self.eos_token_id] ) if len(a__ ) > self.model_max_length: snake_case_ = input_ids[-self.model_max_length :] return input_ids
85
1
from copy import deepcopy class SCREAMING_SNAKE_CASE__ : """simple docstring""" def __init__( self : Any , __A : list[int] | None = None , __A : int | None = None ): if arr is None and size is not None: snake_case__ : Tuple = size snake_case__ : Tuple = [0] * size elif arr is not None: self.init(__A ) else: raise ValueError("Either arr or size must be specified" ) def _lowercase ( self : Union[str, Any] , __A : list[int] ): snake_case__ : Tuple = len(__A ) snake_case__ : str = deepcopy(__A ) for i in range(1 , self.size ): snake_case__ : List[str] = self.next_(__A ) if j < self.size: self.tree[j] += self.tree[i] def _lowercase ( self : Dict ): snake_case__ : List[Any] = self.tree[:] for i in range(self.size - 1 , 0 , -1 ): snake_case__ : Optional[Any] = self.next_(__A ) if j < self.size: arr[j] -= arr[i] return arr @staticmethod def _lowercase ( __A : int ): return index + (index & (-index)) @staticmethod def _lowercase ( __A : int ): return index - (index & (-index)) def _lowercase ( self : List[Any] , __A : int , __A : int ): if index == 0: self.tree[0] += value return while index < self.size: self.tree[index] += value snake_case__ : Dict = self.next_(__A ) def _lowercase ( self : List[Any] , __A : int , __A : int ): self.add(__A , value - self.get(__A ) ) def _lowercase ( self : List[Any] , __A : int ): if right == 0: return 0 snake_case__ : Optional[Any] = self.tree[0] right -= 1 # make right inclusive while right > 0: result += self.tree[right] snake_case__ : str = self.prev(__A ) return result def _lowercase ( self : Optional[Any] , __A : int , __A : int ): return self.prefix(__A ) - self.prefix(__A ) def _lowercase ( self : Optional[Any] , __A : int ): return self.query(__A , index + 1 ) def _lowercase ( self : Tuple , __A : int ): value -= self.tree[0] if value < 0: return -1 snake_case__ : List[str] = 1 # Largest power of 2 <= size while j * 2 < self.size: j *= 2 snake_case__ : List[str] = 0 while j > 0: if i + j < self.size and self.tree[i + j] <= value: value -= self.tree[i + j] i += j j //= 2 return i if __name__ == "__main__": import doctest doctest.testmod()
286
from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import rescale, resize, to_channel_dimension_format from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging if is_vision_available(): import PIL __lowerCamelCase : int = logging.get_logger(__name__) def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any ): snake_case__ : List[str] = b.T snake_case__ : Union[str, Any] = np.sum(np.square(snake_case_ ) , axis=1 ) snake_case__ : Dict = np.sum(np.square(snake_case_ ) , axis=0 ) snake_case__ : Dict = np.matmul(snake_case_ , snake_case_ ) snake_case__ : Any = aa[:, None] - 2 * ab + ba[None, :] return d def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple ): snake_case__ : Tuple = x.reshape(-1 , 3 ) snake_case__ : int = squared_euclidean_distance(snake_case_ , snake_case_ ) return np.argmin(snake_case_ , axis=1 ) class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ): """simple docstring""" a_ = ["pixel_values"] def __init__( self : str , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : bool = True , __A : Dict[str, int] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : bool = True , **__A : Union[str, Any] , ): super().__init__(**__A ) snake_case__ : Optional[int] = size if size is not None else {"height": 2_5_6, "width": 2_5_6} snake_case__ : List[Any] = get_size_dict(__A ) snake_case__ : Any = np.array(__A ) if clusters is not None else None snake_case__ : Optional[Any] = do_resize snake_case__ : Any = size snake_case__ : List[Any] = resample snake_case__ : List[Any] = do_normalize snake_case__ : Dict = do_color_quantize def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : Optional[Union[str, ChannelDimension]] = None , **__A : int , ): snake_case__ : List[Any] = get_size_dict(__A ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dictionary must contain both height and width keys. Got {size.keys()}''' ) return resize( __A , size=(size["height"], size["width"]) , resample=__A , data_format=__A , **__A ) def _lowercase ( self : List[Any] , __A : np.ndarray , __A : Optional[Union[str, ChannelDimension]] = None , ): snake_case__ : List[str] = rescale(image=__A , scale=1 / 1_2_7.5 , data_format=__A ) snake_case__ : List[Any] = image - 1 return image def _lowercase ( self : Dict , __A : ImageInput , __A : bool = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Optional[bool] = None , __A : Optional[Union[List[List[int]], np.ndarray]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Optional[Union[str, ChannelDimension]] = ChannelDimension.FIRST , **__A : Optional[int] , ): snake_case__ : Any = do_resize if do_resize is not None else self.do_resize snake_case__ : Union[str, Any] = size if size is not None else self.size snake_case__ : Union[str, Any] = get_size_dict(__A ) snake_case__ : Optional[Any] = resample if resample is not None else self.resample snake_case__ : Tuple = do_normalize if do_normalize is not None else self.do_normalize snake_case__ : Optional[Any] = do_color_quantize if do_color_quantize is not None else self.do_color_quantize snake_case__ : Union[str, Any] = clusters if clusters is not None else self.clusters snake_case__ : Union[str, Any] = np.array(__A ) snake_case__ : Any = make_list_of_images(__A ) if not valid_images(__A ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None or resample is None: raise ValueError("Size and resample must be specified if do_resize is True." ) if do_color_quantize and clusters is None: raise ValueError("Clusters must be specified if do_color_quantize is True." ) # All transformations expect numpy arrays. snake_case__ : Optional[Any] = [to_numpy_array(__A ) for image in images] if do_resize: snake_case__ : List[str] = [self.resize(image=__A , size=__A , resample=__A ) for image in images] if do_normalize: snake_case__ : Union[str, Any] = [self.normalize(image=__A ) for image in images] if do_color_quantize: snake_case__ : int = [to_channel_dimension_format(__A , ChannelDimension.LAST ) for image in images] # color quantize from (batch_size, height, width, 3) to (batch_size, height, width) snake_case__ : int = np.array(__A ) snake_case__ : Dict = color_quantize(__A , __A ).reshape(images.shape[:-1] ) # flatten to (batch_size, height*width) snake_case__ : str = images.shape[0] snake_case__ : str = images.reshape(__A , -1 ) # We need to convert back to a list of images to keep consistent behaviour across processors. snake_case__ : Union[str, Any] = list(__A ) else: snake_case__ : Any = [to_channel_dimension_format(__A , __A ) for image in images] snake_case__ : Optional[int] = {"input_ids": images} return BatchFeature(data=__A , tensor_type=__A )
286
1
"""simple docstring""" import collections import inspect import unittest from transformers import SwinvaConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class a : """simple docstring""" def __init__( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Optional[int]=13 , UpperCamelCase: List[Any]=32 , UpperCamelCase: str=2 , UpperCamelCase: List[str]=3 , UpperCamelCase: Dict=16 , UpperCamelCase: Dict=[1, 2, 1] , UpperCamelCase: List[Any]=[2, 2, 4] , UpperCamelCase: Optional[int]=2 , UpperCamelCase: str=2.0 , UpperCamelCase: Tuple=True , UpperCamelCase: Union[str, Any]=0.0 , UpperCamelCase: List[str]=0.0 , UpperCamelCase: Optional[int]=0.1 , UpperCamelCase: int="gelu" , UpperCamelCase: Any=False , UpperCamelCase: str=True , UpperCamelCase: List[Any]=0.02 , UpperCamelCase: str=1e-5 , UpperCamelCase: Optional[int]=True , UpperCamelCase: Dict=None , UpperCamelCase: Dict=True , UpperCamelCase: Tuple=10 , UpperCamelCase: Any=8 , ): """simple docstring""" A__ = parent A__ = batch_size A__ = image_size A__ = patch_size A__ = num_channels A__ = embed_dim A__ = depths A__ = num_heads A__ = window_size A__ = mlp_ratio A__ = qkv_bias A__ = hidden_dropout_prob A__ = attention_probs_dropout_prob A__ = drop_path_rate A__ = hidden_act A__ = use_absolute_embeddings A__ = patch_norm A__ = layer_norm_eps A__ = initializer_range A__ = is_training A__ = scope A__ = use_labels A__ = type_sequence_label_size A__ = encoder_stride def UpperCamelCase ( self: int ): """simple docstring""" A__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) A__ = None if self.use_labels: A__ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) A__ = self.get_config() return config, pixel_values, labels def UpperCamelCase ( self: Any ): """simple docstring""" return SwinvaConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def UpperCamelCase ( self: Any , UpperCamelCase: Any , UpperCamelCase: Union[str, Any] , UpperCamelCase: Optional[Any] ): """simple docstring""" A__ = SwinvaModel(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) A__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1)) A__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) ) def UpperCamelCase ( self: str , UpperCamelCase: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: List[Any] ): """simple docstring""" A__ = SwinvaForMaskedImageModeling(config=UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) ) # test greyscale images A__ = 1 A__ = SwinvaForMaskedImageModeling(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) A__ = model(UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) ) def UpperCamelCase ( self: int , UpperCamelCase: Any , UpperCamelCase: List[str] , UpperCamelCase: Tuple ): """simple docstring""" A__ = self.type_sequence_label_size A__ = SwinvaForImageClassification(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() A__ = model(UpperCamelCase , labels=UpperCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.prepare_config_and_inputs() A__ , A__ , A__ = config_and_inputs A__ = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class a ( _lowerCamelCase, _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = ( (SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else () ) UpperCAmelCase = ( {"feature-extraction": SwinvaModel, "image-classification": SwinvaForImageClassification} if is_torch_available() else {} ) UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False UpperCAmelCase = False def UpperCamelCase ( self: Any ): """simple docstring""" A__ = SwinvaModelTester(self ) A__ = ConfigTester(self , config_class=UpperCamelCase , embed_dim=37 ) def UpperCamelCase ( self: List[str] ): """simple docstring""" self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*UpperCamelCase ) @unittest.skip(reason="""Got `CUDA error: misaligned address` with PyTorch 2.0.0.""" ) def UpperCamelCase ( self: int ): """simple docstring""" pass @unittest.skip(reason="""Swinv2 does not use inputs_embeds""" ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" pass def UpperCamelCase ( self: Any ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) A__ = model.get_output_embeddings() self.assertTrue(x is None or isinstance(UpperCamelCase , nn.Linear ) ) def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: A__ = model_class(UpperCamelCase ) A__ = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic A__ = [*signature.parameters.keys()] A__ = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , UpperCamelCase ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = True for model_class in self.all_model_classes: A__ = True A__ = False A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions A__ = len(self.model_tester.depths ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # check that output_attentions also work using config del inputs_dict["output_attentions"] A__ = True A__ = config.window_size**2 A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) self.assertListEqual( list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) A__ = len(UpperCamelCase ) # Check attention is always last and order is fine A__ = True A__ = True A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) if hasattr(self.model_tester , """num_hidden_states_types""" ): A__ = self.model_tester.num_hidden_states_types else: # also another +1 for reshaped_hidden_states A__ = 2 self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase ) ) A__ = outputs.attentions self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) self.assertListEqual( list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , ) def UpperCamelCase ( self: Optional[int] , UpperCamelCase: Tuple , UpperCamelCase: Union[str, Any] , UpperCamelCase: int , UpperCamelCase: str ): """simple docstring""" A__ = model_class(UpperCamelCase ) model.to(UpperCamelCase ) model.eval() with torch.no_grad(): A__ = model(**self._prepare_for_class(UpperCamelCase , UpperCamelCase ) ) A__ = outputs.hidden_states A__ = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 ) self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) # Swinv2 has a different seq_length A__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) A__ = outputs.reshaped_hidden_states self.assertEqual(len(UpperCamelCase ) , UpperCamelCase ) A__ , A__ , A__ , A__ = reshaped_hidden_states[0].shape A__ = ( reshaped_hidden_states[0].view(UpperCamelCase , UpperCamelCase , height * width ).permute(0 , 2 , 1 ) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , ) def UpperCamelCase ( self: str ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes: A__ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase ) def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = 3 A__ = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable ) else (self.model_tester.image_size, self.model_tester.image_size) ) A__ = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable ) else (config.patch_size, config.patch_size) ) A__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) A__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes: A__ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] A__ = True self.check_hidden_states_output(UpperCamelCase , UpperCamelCase , UpperCamelCase , (padded_height, padded_width) ) def UpperCamelCase ( self: List[str] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*UpperCamelCase ) def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*UpperCamelCase ) @slow def UpperCamelCase ( self: Any ): """simple docstring""" for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: A__ = SwinvaModel.from_pretrained(UpperCamelCase ) self.assertIsNotNone(UpperCamelCase ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ , A__ = self.model_tester.prepare_config_and_inputs_for_common() A__ = _config_zero_init(UpperCamelCase ) for model_class in self.all_model_classes: A__ = model_class(config=UpperCamelCase ) for name, param in model.named_parameters(): if "embeddings" not in name and "logit_scale" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class a ( unittest.TestCase ): """simple docstring""" @cached_property def UpperCamelCase ( self: Optional[int] ): """simple docstring""" return ( AutoImageProcessor.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ) if is_vision_available() else None ) @slow def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" A__ = SwinvaForImageClassification.from_pretrained("""microsoft/swinv2-tiny-patch4-window8-256""" ).to( UpperCamelCase ) A__ = self.default_image_processor A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) A__ = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).to(UpperCamelCase ) # forward pass with torch.no_grad(): A__ = model(**UpperCamelCase ) # verify the logits A__ = torch.Size((1, 10_00) ) self.assertEqual(outputs.logits.shape , UpperCamelCase ) A__ = torch.tensor([-0.3_947, -0.4_306, 0.0_026] ).to(UpperCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , UpperCamelCase , atol=1e-4 ) )
335
"""simple docstring""" import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import YolosImageProcessor class a ( unittest.TestCase ): """simple docstring""" def __init__( self: Optional[Any] , UpperCamelCase: Any , UpperCamelCase: Optional[int]=7 , UpperCamelCase: str=3 , UpperCamelCase: int=30 , UpperCamelCase: int=4_00 , UpperCamelCase: Union[str, Any]=True , UpperCamelCase: Tuple=None , UpperCamelCase: Any=True , UpperCamelCase: int=[0.5, 0.5, 0.5] , UpperCamelCase: Any=[0.5, 0.5, 0.5] , UpperCamelCase: Optional[Any]=True , UpperCamelCase: List[Any]=1 / 2_55 , UpperCamelCase: Tuple=True , ): """simple docstring""" A__ = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33} A__ = parent A__ = batch_size A__ = num_channels A__ = min_resolution A__ = max_resolution A__ = do_resize A__ = size A__ = do_normalize A__ = image_mean A__ = image_std A__ = do_rescale A__ = rescale_factor A__ = do_pad def UpperCamelCase ( self: Optional[Any] ): """simple docstring""" return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def UpperCamelCase ( self: Any , UpperCamelCase: List[str] , UpperCamelCase: int=False ): """simple docstring""" if not batched: A__ = image_inputs[0] if isinstance(UpperCamelCase , Image.Image ): A__ , A__ = image.size else: A__ , A__ = image.shape[1], image.shape[2] if w < h: A__ = int(self.size["""shortest_edge"""] * h / w ) A__ = self.size["""shortest_edge"""] elif w > h: A__ = self.size["""shortest_edge"""] A__ = int(self.size["""shortest_edge"""] * w / h ) else: A__ = self.size["""shortest_edge"""] A__ = self.size["""shortest_edge"""] else: A__ = [] for image in image_inputs: A__ , A__ = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[0] )[0] A__ = max(UpperCamelCase , key=lambda UpperCamelCase : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class a ( _lowerCamelCase, unittest.TestCase ): """simple docstring""" UpperCAmelCase = YolosImageProcessor if is_vision_available() else None def UpperCamelCase ( self: Optional[int] ): """simple docstring""" A__ = YolosImageProcessingTester(self ) @property def UpperCamelCase ( self: Optional[int] ): """simple docstring""" return self.image_processor_tester.prepare_image_processor_dict() def UpperCamelCase ( self: Union[str, Any] ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCamelCase , """image_mean""" ) ) self.assertTrue(hasattr(UpperCamelCase , """image_std""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_normalize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """do_resize""" ) ) self.assertTrue(hasattr(UpperCamelCase , """size""" ) ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} ) self.assertEqual(image_processor.do_pad , UpperCamelCase ) A__ = self.image_processing_class.from_dict( self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=UpperCamelCase ) self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} ) self.assertEqual(image_processor.do_pad , UpperCamelCase ) def UpperCamelCase ( self: str ): """simple docstring""" pass def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PIL images A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , Image.Image ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: Tuple ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , numpify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , np.ndarray ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test not batched input A__ = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched A__ = image_processing(UpperCamelCase , return_tensors="""pt""" ).pixel_values A__ , A__ = self.image_processor_tester.get_expected_values(UpperCamelCase , batched=UpperCamelCase ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def UpperCamelCase ( self: str ): """simple docstring""" A__ = self.image_processing_class(**self.image_processor_dict ) A__ = self.image_processing_class(do_resize=UpperCamelCase , do_normalize=UpperCamelCase , do_rescale=UpperCamelCase ) # create random PyTorch tensors A__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCamelCase , torchify=UpperCamelCase ) for image in image_inputs: self.assertIsInstance(UpperCamelCase , torch.Tensor ) # Test whether the method "pad" and calling the image processor return the same tensors A__ = image_processing_a.pad(UpperCamelCase , return_tensors="""pt""" ) A__ = image_processing_a(UpperCamelCase , return_tensors="""pt""" ) self.assertTrue( torch.allclose(encoded_images_with_method["""pixel_values"""] , encoded_images["""pixel_values"""] , atol=1e-4 ) ) @slow def UpperCamelCase ( self: str ): """simple docstring""" A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f: A__ = json.loads(f.read() ) A__ = {"""image_id""": 3_97_69, """annotations""": target} # encode them A__ = YolosImageProcessor.from_pretrained("""hustvl/yolos-small""" ) A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , return_tensors="""pt""" ) # verify pixel values A__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase ) A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) ) # verify area A__ = torch.tensor([5_887.9_600, 11_250.2_061, 489_353.8_438, 837_122.7_500, 147_967.5_156, 165_732.3_438] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase ) A__ = torch.tensor([0.5_503, 0.2_765, 0.0_604, 0.2_215] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) ) # verify image_id A__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) ) # verify class_labels A__ = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) ) # verify orig_size A__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) ) # verify size A__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) ) @slow def UpperCamelCase ( self: int ): """simple docstring""" A__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f: A__ = json.loads(f.read() ) A__ = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target} A__ = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" ) # encode them A__ = YolosImageProcessor(format="""coco_panoptic""" ) A__ = image_processing(images=UpperCamelCase , annotations=UpperCamelCase , masks_path=UpperCamelCase , return_tensors="""pt""" ) # verify pixel values A__ = torch.Size([1, 3, 8_00, 10_66] ) self.assertEqual(encoding["""pixel_values"""].shape , UpperCamelCase ) A__ = torch.tensor([0.2_796, 0.3_138, 0.3_481] ) self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , UpperCamelCase , atol=1e-4 ) ) # verify area A__ = torch.tensor([147_979.6_875, 165_527.0_469, 484_638.5_938, 11_292.9_375, 5_879.6_562, 7_634.1_147] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , UpperCamelCase ) ) # verify boxes A__ = torch.Size([6, 4] ) self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , UpperCamelCase ) A__ = torch.tensor([0.2_625, 0.5_437, 0.4_688, 0.8_625] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , UpperCamelCase , atol=1e-3 ) ) # verify image_id A__ = torch.tensor([3_97_69] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , UpperCamelCase ) ) # verify is_crowd A__ = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , UpperCamelCase ) ) # verify class_labels A__ = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , UpperCamelCase ) ) # verify masks A__ = 82_28_73 self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , UpperCamelCase ) # verify orig_size A__ = torch.tensor([4_80, 6_40] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , UpperCamelCase ) ) # verify size A__ = torch.tensor([8_00, 10_66] ) self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , UpperCamelCase ) )
335
1
import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class __lowerCamelCase ( unittest.TestCase ): """simple docstring""" def A__ ( self ) -> Dict: '''simple docstring''' lowercase_ = tempfile.mkdtemp() # fmt: off lowercase_ = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""] # fmt: on lowercase_ = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) lowercase_ = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""] lowercase_ = {"""unk_token""": """<unk>"""} lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) lowercase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(_lowerCAmelCase ) ) lowercase_ = { """do_resize""": True, """size""": 20, """do_center_crop""": True, """crop_size""": 18, """do_normalize""": True, """image_mean""": [0.48145466, 0.4578275, 0.40821073], """image_std""": [0.26862954, 0.26130258, 0.27577711], } lowercase_ = os.path.join(self.tmpdirname , _lowerCAmelCase ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(_lowerCAmelCase , _lowerCAmelCase ) def A__ ( self , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCAmelCase ) def A__ ( self , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **_lowerCAmelCase ) def A__ ( self , **UpperCAmelCase ) -> Optional[Any]: '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def A__ ( self ) -> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A__ ( self ) -> int: '''simple docstring''' lowercase_ = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase_ = [Image.fromarray(np.moveaxis(_lowerCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ = self.get_tokenizer() lowercase_ = self.get_rust_tokenizer() lowercase_ = self.get_image_processor() lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase_ = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_lowerCAmelCase ) lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase_ = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , _lowerCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , _lowerCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , _lowerCAmelCase ) self.assertIsInstance(processor_fast.image_processor , _lowerCAmelCase ) def A__ ( self ) -> Dict: '''simple docstring''' lowercase_ = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase_ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) lowercase_ = self.get_image_processor(do_normalize=_lowerCAmelCase ) lowercase_ = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowerCAmelCase ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , _lowerCAmelCase ) def A__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase_ = self.prepare_image_inputs() lowercase_ = image_processor(_lowerCAmelCase , return_tensors="np" ) lowercase_ = processor(images=_lowerCAmelCase , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase_ = """lower newer""" lowercase_ = processor(text=_lowerCAmelCase , return_tensors="np" ) lowercase_ = tokenizer(_lowerCAmelCase , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def A__ ( self ) -> List[str]: '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase_ = """lower newer""" lowercase_ = self.prepare_image_inputs() lowercase_ = processor(text=_lowerCAmelCase , images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def A__ ( self ) -> Union[str, Any]: '''simple docstring''' lowercase_ = """google/owlvit-base-patch32""" lowercase_ = OwlViTProcessor.from_pretrained(_lowerCAmelCase ) lowercase_ = ["""cat""", """nasa badge"""] lowercase_ = processor(text=_lowerCAmelCase ) lowercase_ = 16 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def A__ ( self ) -> Optional[Any]: '''simple docstring''' lowercase_ = """google/owlvit-base-patch32""" lowercase_ = OwlViTProcessor.from_pretrained(_lowerCAmelCase ) lowercase_ = [["""cat""", """nasa badge"""], ["""person"""]] lowercase_ = processor(text=_lowerCAmelCase ) lowercase_ = 16 lowercase_ = len(_lowerCAmelCase ) lowercase_ = max([len(_lowerCAmelCase ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def A__ ( self ) -> List[Any]: '''simple docstring''' lowercase_ = """google/owlvit-base-patch32""" lowercase_ = OwlViTProcessor.from_pretrained(_lowerCAmelCase ) lowercase_ = ["""cat""", """nasa badge"""] lowercase_ = processor(text=_lowerCAmelCase ) lowercase_ = 16 lowercase_ = inputs["""input_ids"""] lowercase_ = [ [49406, 2368, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [49406, 6841, 11301, 49407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def A__ ( self ) -> str: '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase_ = self.prepare_image_inputs() lowercase_ = self.prepare_image_inputs() lowercase_ = processor(images=_lowerCAmelCase , query_images=_lowerCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(_lowerCAmelCase ): processor() def A__ ( self ) -> List[Any]: '''simple docstring''' lowercase_ = self.get_image_processor() lowercase_ = self.get_tokenizer() lowercase_ = OwlViTProcessor(tokenizer=_lowerCAmelCase , image_processor=_lowerCAmelCase ) lowercase_ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase_ = processor.batch_decode(_lowerCAmelCase ) lowercase_ = tokenizer.batch_decode(_lowerCAmelCase ) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase )
370
import json from typing import TYPE_CHECKING, List, Optional, Tuple from tokenizers import pre_tokenizers from ...tokenization_utils_base import BatchEncoding from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_gpta import GPTaTokenizer if TYPE_CHECKING: from transformers.pipelines.conversational import Conversation SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""} SCREAMING_SNAKE_CASE__ = { """vocab_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""", }, """merges_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""", }, """tokenizer_file""": { """gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""", """gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""", """gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""", """gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""", """distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""", }, } SCREAMING_SNAKE_CASE__ = { """gpt2""": 1_0_2_4, """gpt2-medium""": 1_0_2_4, """gpt2-large""": 1_0_2_4, """gpt2-xl""": 1_0_2_4, """distilgpt2""": 1_0_2_4, } class __lowerCamelCase ( snake_case_ ): """simple docstring""" lowerCAmelCase__ = VOCAB_FILES_NAMES lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCAmelCase__ = ["input_ids", "attention_mask"] lowerCAmelCase__ = GPTaTokenizer def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]: '''simple docstring''' super().__init__( UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , ) lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase ) lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() ) if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space: lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) ) lowercase_ = add_prefix_space lowercase_ = pre_tok_class(**UpperCAmelCase ) lowercase_ = add_prefix_space def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase ) def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding: '''simple docstring''' lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase ) assert self.add_prefix_space or not is_split_into_words, ( F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True ' "to use it with pretokenized inputs." ) return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase ) def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]: '''simple docstring''' lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase ) return tuple(UpperCAmelCase ) def A__ ( self , UpperCAmelCase ) -> List[int]: '''simple docstring''' lowercase_ = [] for is_user, text in conversation.iter_texts(): input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] ) if len(UpperCAmelCase ) > self.model_max_length: lowercase_ = input_ids[-self.model_max_length :] return input_ids
297
0
'''simple docstring''' import argparse import logging import pickle import random import time import numpy as np from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer logging.basicConfig( format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO ) UpperCAmelCase : str = logging.getLogger(__name__) def a__ ( ): """simple docstring""" __SCREAMING_SNAKE_CASE = argparse.ArgumentParser( description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" ) parser.add_argument("""--file_path""" , type=snake_case__ , default="""data/dump.txt""" , help="""The path to the data.""" ) parser.add_argument("""--tokenizer_type""" , type=snake_case__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] ) parser.add_argument("""--tokenizer_name""" , type=snake_case__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" ) parser.add_argument("""--dump_file""" , type=snake_case__ , default="""data/dump""" , help="""The dump file prefix.""" ) __SCREAMING_SNAKE_CASE = parser.parse_args() logger.info(F'Loading Tokenizer ({args.tokenizer_name})' ) if args.tokenizer_type == "bert": __SCREAMING_SNAKE_CASE = BertTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""cls_token"""] # `[CLS]` __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""sep_token"""] # `[SEP]` elif args.tokenizer_type == "roberta": __SCREAMING_SNAKE_CASE = RobertaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""cls_token"""] # `<s>` __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""sep_token"""] # `</s>` elif args.tokenizer_type == "gpt2": __SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained(args.tokenizer_name ) __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""bos_token"""] # `<|endoftext|>` __SCREAMING_SNAKE_CASE = tokenizer.special_tokens_map["""eos_token"""] # `<|endoftext|>` logger.info(F'Loading text from {args.file_path}' ) with open(args.file_path , """r""" , encoding="""utf8""" ) as fp: __SCREAMING_SNAKE_CASE = fp.readlines() logger.info("""Start encoding""" ) logger.info(F'{len(snake_case__ )} examples to process.' ) __SCREAMING_SNAKE_CASE = [] __SCREAMING_SNAKE_CASE = 0 __SCREAMING_SNAKE_CASE = 1_00_00 __SCREAMING_SNAKE_CASE = time.time() for text in data: __SCREAMING_SNAKE_CASE = F'{bos} {text.strip()} {sep}' __SCREAMING_SNAKE_CASE = tokenizer.encode(snake_case__ , add_special_tokens=snake_case__ ) rslt.append(snake_case__ ) iter += 1 if iter % interval == 0: __SCREAMING_SNAKE_CASE = time.time() logger.info(F'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' ) __SCREAMING_SNAKE_CASE = time.time() logger.info("""Finished binarization""" ) logger.info(F'{len(snake_case__ )} examples processed.' ) __SCREAMING_SNAKE_CASE = F'{args.dump_file}.{args.tokenizer_name}.pickle' __SCREAMING_SNAKE_CASE = tokenizer.vocab_size if vocab_size < (1 << 16): __SCREAMING_SNAKE_CASE = [np.uintaa(snake_case__ ) for d in rslt] else: __SCREAMING_SNAKE_CASE = [np.intaa(snake_case__ ) for d in rslt] random.shuffle(rslt_ ) logger.info(F'Dump to {dp_file}' ) with open(snake_case__ , """wb""" ) as handle: pickle.dump(rslt_ , snake_case__ , protocol=pickle.HIGHEST_PROTOCOL ) if __name__ == "__main__": main()
267
"""simple docstring""" import argparse import torch from huggingface_hub import hf_hub_download from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM from transformers.utils import logging logging.set_verbosity_info() _lowercase = logging.get_logger(__name__) def _snake_case ( snake_case__ : str , snake_case__ : str ): A = RobertaPreLayerNormConfig.from_pretrained( snake_case__ , architectures=['RobertaPreLayerNormForMaskedLM'] ) # convert state_dict A = torch.load(hf_hub_download(repo_id=snake_case__ , filename='pytorch_model.bin' ) ) A = {} for tensor_key, tensor_value in original_state_dict.items(): # The transformer implementation gives the model a unique name, rather than overwiriting 'roberta' if tensor_key.startswith('roberta.' ): A = 'roberta_prelayernorm.' + tensor_key[len('roberta.' ) :] # The original implementation contains weights which are not used, remove them from the state_dict if tensor_key.endswith('.self.LayerNorm.weight' ) or tensor_key.endswith('.self.LayerNorm.bias' ): continue A = tensor_value A = RobertaPreLayerNormForMaskedLM.from_pretrained( pretrained_model_name_or_path=snake_case__ , config=snake_case__ , state_dict=snake_case__ ) model.save_pretrained(snake_case__ ) # convert tokenizer A = AutoTokenizer.from_pretrained(snake_case__ ) tokenizer.save_pretrained(snake_case__ ) if __name__ == "__main__": _lowercase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint-repo''', default=None, type=str, required=True, help='''Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) _lowercase = parser.parse_args() convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
74
0
'''simple docstring''' import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) -> List[str]: '''simple docstring''' snake_case_ = LxmertConfig.from_json_file(__UpperCAmelCase ) print(F"Building PyTorch model from configuration: {config}" ) snake_case_ = LxmertForPreTraining(__UpperCAmelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(__UpperCAmelCase, __UpperCAmelCase, __UpperCAmelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict(), __UpperCAmelCase ) if __name__ == "__main__": a : List[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( '--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.' ) parser.add_argument( '--config_file', default=None, type=str, required=True, help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.', ) parser.add_argument( '--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.' ) a : Optional[int] = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
72
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class a ( _lowerCamelCase ): snake_case_ = 42 snake_case_ = None def __magic_name__ ( __UpperCAmelCase, __UpperCAmelCase=0.9_9_9, __UpperCAmelCase="cosine", ) -> Dict: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" ) snake_case_ = [] for i in range(__UpperCAmelCase ): snake_case_ = i / num_diffusion_timesteps snake_case_ = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ), __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase, dtype=torch.floataa ) class a ( _lowerCamelCase , _lowerCamelCase ): @register_to_config def __init__( self : List[str] , lowercase_ : int = 1000 , lowercase_ : str = "fixed_small_log" , lowercase_ : bool = True , lowercase_ : Optional[float] = 1.0 , lowercase_ : str = "epsilon" , lowercase_ : str = "squaredcos_cap_v2" , ): if beta_schedule != "squaredcos_cap_v2": raise ValueError('''UnCLIPScheduler only supports `beta_schedule`: \'squaredcos_cap_v2\'''' ) snake_case_ = betas_for_alpha_bar(lowercase_ ) snake_case_ = 1.0 - self.betas snake_case_ = torch.cumprod(self.alphas , dim=0 ) snake_case_ = torch.tensor(1.0 ) # standard deviation of the initial noise distribution snake_case_ = 1.0 # setable values snake_case_ = None snake_case_ = torch.from_numpy(np.arange(0 , lowercase_ )[::-1].copy() ) snake_case_ = variance_type def A_ ( self : Optional[Any] , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None ): return sample def A_ ( self : Optional[int] , lowercase_ : int , lowercase_ : Union[str, torch.device] = None ): snake_case_ = num_inference_steps snake_case_ = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) snake_case_ = (np.arange(0 , lowercase_ ) * step_ratio).round()[::-1].copy().astype(np.intaa ) snake_case_ = torch.from_numpy(lowercase_ ).to(lowercase_ ) def A_ ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : Optional[int]=None , lowercase_ : Tuple=None , lowercase_ : Tuple=None ): if prev_timestep is None: snake_case_ = t - 1 snake_case_ = self.alphas_cumprod[t] snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one snake_case_ = 1 - alpha_prod_t snake_case_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: snake_case_ = self.betas[t] else: snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample snake_case_ = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: snake_case_ = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": snake_case_ = torch.log(torch.clamp(lowercase_ , min=1e-20 ) ) snake_case_ = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler snake_case_ = variance.log() snake_case_ = beta.log() snake_case_ = (predicted_variance + 1) / 2 snake_case_ = frac * max_log + (1 - frac) * min_log return variance def A_ ( self : List[Any] , lowercase_ : torch.FloatTensor , lowercase_ : int , lowercase_ : torch.FloatTensor , lowercase_ : Optional[int] = None , lowercase_ : int=None , lowercase_ : bool = True , ): snake_case_ = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": snake_case_ ,snake_case_ = torch.split(lowercase_ , sample.shape[1] , dim=1 ) else: snake_case_ = None # 1. compute alphas, betas if prev_timestep is None: snake_case_ = t - 1 snake_case_ = self.alphas_cumprod[t] snake_case_ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one snake_case_ = 1 - alpha_prod_t snake_case_ = 1 - alpha_prod_t_prev if prev_timestep == t - 1: snake_case_ = self.betas[t] snake_case_ = self.alphas[t] else: snake_case_ = 1 - alpha_prod_t / alpha_prod_t_prev snake_case_ = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": snake_case_ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": snake_case_ = model_output else: raise ValueError( F"prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`" ''' for the UnCLIPScheduler.''' ) # 3. Clip "predicted x_0" if self.config.clip_sample: snake_case_ = torch.clamp( lowercase_ , -self.config.clip_sample_range , self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t snake_case_ = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf snake_case_ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise snake_case_ = 0 if t > 0: snake_case_ = randn_tensor( model_output.shape , dtype=model_output.dtype , generator=lowercase_ , device=model_output.device ) snake_case_ = self._get_variance( lowercase_ , predicted_variance=lowercase_ , prev_timestep=lowercase_ , ) if self.variance_type == "fixed_small_log": snake_case_ = variance elif self.variance_type == "learned_range": snake_case_ = (0.5 * variance).exp() else: raise ValueError( F"variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`" ''' for the UnCLIPScheduler.''' ) snake_case_ = variance * variance_noise snake_case_ = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=lowercase_ , pred_original_sample=lowercase_ ) def A_ ( self : Any , lowercase_ : torch.FloatTensor , lowercase_ : torch.FloatTensor , lowercase_ : torch.IntTensor , ): # Make sure alphas_cumprod and timestep have same device and dtype as original_samples snake_case_ = self.alphas_cumprod.to(device=original_samples.device , dtype=original_samples.dtype ) snake_case_ = timesteps.to(original_samples.device ) snake_case_ = alphas_cumprod[timesteps] ** 0.5 snake_case_ = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): snake_case_ = sqrt_alpha_prod.unsqueeze(-1 ) snake_case_ = (1 - alphas_cumprod[timesteps]) ** 0.5 snake_case_ = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): snake_case_ = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) snake_case_ = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
72
1
"""simple docstring""" import unittest from transformers import MPNetConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class __snake_case : """simple docstring""" def __init__( self , __lowerCamelCase , __lowerCamelCase=13 , __lowerCamelCase=7 , __lowerCamelCase=True , __lowerCamelCase=True , __lowerCamelCase=False , __lowerCamelCase=True , __lowerCamelCase=99 , __lowerCamelCase=64 , __lowerCamelCase=5 , __lowerCamelCase=4 , __lowerCamelCase=64 , __lowerCamelCase="gelu" , __lowerCamelCase=0.1 , __lowerCamelCase=0.1 , __lowerCamelCase=512 , __lowerCamelCase=16 , __lowerCamelCase=2 , __lowerCamelCase=0.0_2 , __lowerCamelCase=3 , __lowerCamelCase=4 , __lowerCamelCase=None , ): '''simple docstring''' __A : Optional[int] = parent __A : Any = batch_size __A : Union[str, Any] = seq_length __A : List[Any] = is_training __A : Union[str, Any] = use_input_mask __A : Any = use_token_type_ids __A : str = use_labels __A : Tuple = vocab_size __A : List[str] = hidden_size __A : Union[str, Any] = num_hidden_layers __A : List[str] = num_attention_heads __A : Optional[int] = intermediate_size __A : int = hidden_act __A : List[str] = hidden_dropout_prob __A : Optional[Any] = attention_probs_dropout_prob __A : Union[str, Any] = max_position_embeddings __A : int = type_vocab_size __A : str = type_sequence_label_size __A : List[Any] = initializer_range __A : str = num_labels __A : Optional[int] = num_choices __A : Dict = scope def UpperCamelCase__( self ): '''simple docstring''' return MPNetConfig.from_pretrained('''microsoft/mpnet-base''' ) def UpperCamelCase__( self ): '''simple docstring''' __A : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __A : str = None if self.use_input_mask: __A : int = random_attention_mask([self.batch_size, self.seq_length] ) __A : Union[str, Any] = None __A : str = None __A : Optional[int] = None if self.use_labels: __A : str = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __A : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __A : int = ids_tensor([self.batch_size] , self.num_choices ) __A : Optional[int] = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def UpperCamelCase__( self ): '''simple docstring''' return MPNetConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : Tuple = MPNetModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __A : Any = model(__lowerCamelCase , __lowerCamelCase ) __A : List[Any] = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : Any = MPNetForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __A : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : Tuple = self.num_labels __A : Tuple = MPNetForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __A : Any = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : List[str] = self.num_choices __A : int = MPNetForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __A : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __A : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __A : Union[str, Any] = model( __lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ): '''simple docstring''' __A : Dict = self.num_labels __A : Union[str, Any] = MPNetForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() __A : Union[str, Any] = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase__( self ): '''simple docstring''' __A : str = self.prepare_config_and_inputs() ((__A) , (__A) , (__A) , (__A) , (__A) , (__A)) : Dict = config_and_inputs __A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class __snake_case ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ): """simple docstring""" _lowerCamelCase = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) _lowerCamelCase = ( { """feature-extraction""": MPNetModel, """fill-mask""": MPNetForMaskedLM, """question-answering""": MPNetForQuestionAnswering, """text-classification""": MPNetForSequenceClassification, """token-classification""": MPNetForTokenClassification, """zero-shot""": MPNetForSequenceClassification, } if is_torch_available() else {} ) _lowerCamelCase = False _lowerCamelCase = True def UpperCamelCase__( self ): '''simple docstring''' __A : Optional[Any] = MPNetModelTester(self ) __A : int = ConfigTester(self , config_class=__lowerCamelCase , hidden_size=37 ) def UpperCamelCase__( self ): '''simple docstring''' self.config_tester.run_common_tests() def UpperCamelCase__( self ): '''simple docstring''' __A : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*__lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' __A : str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*__lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' __A : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*__lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*__lowerCamelCase ) def UpperCamelCase__( self ): '''simple docstring''' __A : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*__lowerCamelCase ) @require_torch class __snake_case ( unittest.TestCase ): """simple docstring""" @slow def UpperCamelCase__( self ): '''simple docstring''' __A : List[Any] = MPNetModel.from_pretrained('''microsoft/mpnet-base''' ) __A : Union[str, Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) __A : str = model(__lowerCamelCase )[0] __A : int = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , __lowerCamelCase ) __A : Tuple = torch.tensor( [[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3] , __lowerCamelCase , atol=1e-4 ) )
179
"""simple docstring""" def __lowercase ( snake_case_ : dict ) ->set: '''simple docstring''' __A : List[str] = set() # edges = list of graph's edges __A : Optional[int] = get_edges(snake_case_ ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: __A , __A : str = edges.pop() chosen_vertices.add(snake_case_ ) chosen_vertices.add(snake_case_ ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(snake_case_ ) return chosen_vertices def __lowercase ( snake_case_ : dict ) ->set: '''simple docstring''' __A : Tuple = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
179
1
'''simple docstring''' from __future__ import annotations def a__ ( lowerCAmelCase__ ) -> list: if len(lowerCAmelCase_ ) == 0: return [] UpperCAmelCase__ : Optional[Any] = min(lowerCAmelCase_ ), max(lowerCAmelCase_ ) UpperCAmelCase__ : Any = int(max_value - min_value ) + 1 UpperCAmelCase__ : list[list] = [[] for _ in range(lowerCAmelCase_ )] for i in my_list: buckets[int(i - min_value )].append(lowerCAmelCase_ ) return [v for bucket in buckets for v in sorted(lowerCAmelCase_ )] if __name__ == "__main__": from doctest import testmod testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
369
'''simple docstring''' import json import pathlib import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision, slow from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import DetaImageProcessor class lowerCamelCase_ ( unittest.TestCase ): def __init__( self : List[str] , _A : List[Any] , _A : Union[str, Any]=7 , _A : List[str]=3 , _A : str=30 , _A : Tuple=400 , _A : Optional[int]=True , _A : List[str]=None , _A : int=True , _A : int=[0.5, 0.5, 0.5] , _A : Optional[int]=[0.5, 0.5, 0.5] , _A : List[Any]=True , _A : str=1 / 255 , _A : Tuple=True , ): '''simple docstring''' UpperCAmelCase__ : str = size if size is not None else {'''shortest_edge''': 18, '''longest_edge''': 1_333} UpperCAmelCase__ : Optional[Any] = parent UpperCAmelCase__ : Optional[Any] = batch_size UpperCAmelCase__ : List[str] = num_channels UpperCAmelCase__ : List[Any] = min_resolution UpperCAmelCase__ : List[str] = max_resolution UpperCAmelCase__ : Tuple = do_resize UpperCAmelCase__ : Union[str, Any] = size UpperCAmelCase__ : Dict = do_normalize UpperCAmelCase__ : Union[str, Any] = image_mean UpperCAmelCase__ : Optional[int] = image_std UpperCAmelCase__ : Dict = do_rescale UpperCAmelCase__ : Union[str, Any] = rescale_factor UpperCAmelCase__ : int = do_pad def lowercase_ ( self : Any ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_normalize": self.do_normalize, "image_mean": self.image_mean, "image_std": self.image_std, "do_rescale": self.do_rescale, "rescale_factor": self.rescale_factor, "do_pad": self.do_pad, } def lowercase_ ( self : Any , _A : Union[str, Any] , _A : Union[str, Any]=False ): '''simple docstring''' if not batched: UpperCAmelCase__ : Optional[int] = image_inputs[0] if isinstance(_A , Image.Image ): UpperCAmelCase__ , UpperCAmelCase__ : str = image.size else: UpperCAmelCase__ , UpperCAmelCase__ : int = image.shape[1], image.shape[2] if w < h: UpperCAmelCase__ : Optional[Any] = int(self.size['''shortest_edge'''] * h / w ) UpperCAmelCase__ : List[Any] = self.size['''shortest_edge'''] elif w > h: UpperCAmelCase__ : int = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = int(self.size['''shortest_edge'''] * w / h ) else: UpperCAmelCase__ : List[str] = self.size['''shortest_edge'''] UpperCAmelCase__ : Dict = self.size['''shortest_edge'''] else: UpperCAmelCase__ : int = [] for image in image_inputs: UpperCAmelCase__ , UpperCAmelCase__ : str = self.get_expected_values([image] ) expected_values.append((expected_height, expected_width) ) UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[0] )[0] UpperCAmelCase__ : Union[str, Any] = max(_A , key=lambda _A : item[1] )[1] return expected_height, expected_width @require_torch @require_vision class lowerCamelCase_ ( __a , unittest.TestCase ): lowerCAmelCase__ = DetaImageProcessor if is_vision_available() else None def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = DetaImageProcessingTester(self ) @property def lowercase_ ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(_A , '''image_mean''' ) ) self.assertTrue(hasattr(_A , '''image_std''' ) ) self.assertTrue(hasattr(_A , '''do_normalize''' ) ) self.assertTrue(hasattr(_A , '''do_resize''' ) ) self.assertTrue(hasattr(_A , '''do_rescale''' ) ) self.assertTrue(hasattr(_A , '''do_pad''' ) ) self.assertTrue(hasattr(_A , '''size''' ) ) def lowercase_ ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 18, '''longest_edge''': 1_333} ) self.assertEqual(image_processor.do_pad , _A ) def lowercase_ ( self : Dict ): '''simple docstring''' pass def lowercase_ ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A ) for image in image_inputs: self.assertIsInstance(_A , Image.Image ) # Test not batched input UpperCAmelCase__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ , UpperCAmelCase__ : str = self.image_processor_tester.get_expected_values(_A , batched=_A ) UpperCAmelCase__ : Union[str, Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , numpify=_A ) for image in image_inputs: self.assertIsInstance(_A , np.ndarray ) # Test not batched input UpperCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : List[str] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[str] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : int = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) def lowercase_ ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=_A , torchify=_A ) for image in image_inputs: self.assertIsInstance(_A , torch.Tensor ) # Test not batched input UpperCAmelCase__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.image_processor_tester.get_expected_values(_A ) self.assertEqual( encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , ) # Test batched UpperCAmelCase__ : List[Any] = image_processing(_A , return_tensors='''pt''' ).pixel_values UpperCAmelCase__ , UpperCAmelCase__ : Any = self.image_processor_tester.get_expected_values(_A , batched=_A ) self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, expected_height, expected_width, ) , ) @slow def lowercase_ ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : str = json.loads(f.read() ) UpperCAmelCase__ : Tuple = {'''image_id''': 39_769, '''annotations''': target} # encode them UpperCAmelCase__ : Optional[int] = DetaImageProcessor() UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : Optional[int] = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Any = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : List[Any] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : int = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : str = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify orig_size UpperCAmelCase__ : int = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : int = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) ) @slow def lowercase_ ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ) with open('''./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt''' , '''r''' ) as f: UpperCAmelCase__ : int = json.loads(f.read() ) UpperCAmelCase__ : str = {'''file_name''': '''000000039769.png''', '''image_id''': 39_769, '''segments_info''': target} UpperCAmelCase__ : Dict = pathlib.Path('''./tests/fixtures/tests_samples/COCO/coco_panoptic''' ) # encode them UpperCAmelCase__ : Any = DetaImageProcessor(format='''coco_panoptic''' ) UpperCAmelCase__ : str = image_processing(images=_A , annotations=_A , masks_path=_A , return_tensors='''pt''' ) # verify pixel values UpperCAmelCase__ : str = torch.Size([1, 3, 800, 1_066] ) self.assertEqual(encoding['''pixel_values'''].shape , _A ) UpperCAmelCase__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] ) self.assertTrue(torch.allclose(encoding['''pixel_values'''][0, 0, 0, :3] , _A , atol=1e-4 ) ) # verify area UpperCAmelCase__ : Any = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''area'''] , _A ) ) # verify boxes UpperCAmelCase__ : Dict = torch.Size([6, 4] ) self.assertEqual(encoding['''labels'''][0]['''boxes'''].shape , _A ) UpperCAmelCase__ : List[str] = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''boxes'''][0] , _A , atol=1e-3 ) ) # verify image_id UpperCAmelCase__ : Optional[int] = torch.tensor([39_769] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''image_id'''] , _A ) ) # verify is_crowd UpperCAmelCase__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''iscrowd'''] , _A ) ) # verify class_labels UpperCAmelCase__ : Tuple = torch.tensor([17, 17, 63, 75, 75, 93] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''class_labels'''] , _A ) ) # verify masks UpperCAmelCase__ : Dict = 822_873 self.assertEqual(encoding['''labels'''][0]['''masks'''].sum().item() , _A ) # verify orig_size UpperCAmelCase__ : str = torch.tensor([480, 640] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''orig_size'''] , _A ) ) # verify size UpperCAmelCase__ : Optional[Any] = torch.tensor([800, 1_066] ) self.assertTrue(torch.allclose(encoding['''labels'''][0]['''size'''] , _A ) )
299
0
'''simple docstring''' import math def lowerCamelCase ( __lowerCamelCase : int ) ->bool: _SCREAMING_SNAKE_CASE = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__lowerCamelCase ) def lowerCamelCase ( __lowerCamelCase : float = 1 / 1_2345 ) ->int: _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 0 _SCREAMING_SNAKE_CASE = 3 while True: _SCREAMING_SNAKE_CASE = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__lowerCamelCase ): _SCREAMING_SNAKE_CASE = int(__lowerCamelCase ) total_partitions += 1 if check_partition_perfect(__lowerCamelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__lowerCamelCase ) integer += 1 if __name__ == "__main__": print(f"""{solution() = }""")
58
"""simple docstring""" from typing import List, Optional, Union import numpy as np import PIL import torch from PIL import Image from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) snake_case_ = logging.get_logger(__name__) # pylint: disable=invalid-name snake_case_ = """ Examples: ```py >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline >>> from diffusers.utils import load_image >>> import torch >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16 ... ) >>> pipe_prior.to(\"cuda\") >>> prompt = \"A red cartoon frog, 4k\" >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False) >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained( ... \"kandinsky-community/kandinsky-2-2-decoder\", torch_dtype=torch.float16 ... ) >>> pipe.to(\"cuda\") >>> init_image = load_image( ... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\" ... \"/kandinsky/frog.png\" ... ) >>> image = pipe( ... image=init_image, ... image_embeds=image_emb, ... negative_image_embeds=zero_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... strength=0.2, ... ).images >>> image[0].save(\"red_frog.png\") ``` """ def _lowerCAmelCase ( lowercase_ , lowercase_ , lowercase_=8 ): UpperCAmelCase = height // scale_factor**2 if height % scale_factor**2 != 0: new_height += 1 UpperCAmelCase = width // scale_factor**2 if width % scale_factor**2 != 0: new_width += 1 return new_height * scale_factor, new_width * scale_factor def _lowerCAmelCase ( lowercase_ , lowercase_=512 , lowercase_=512 ): UpperCAmelCase = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 ) UpperCAmelCase = np.array(pil_image.convert('RGB' ) ) UpperCAmelCase = arr.astype(np.floataa ) / 1_2_7.5 - 1 UpperCAmelCase = np.transpose(lowercase_ , [2, 0, 1] ) UpperCAmelCase = torch.from_numpy(lowercase_ ).unsqueeze(0 ) return image class A_ ( SCREAMING_SNAKE_CASE_ ): """simple docstring""" def __init__( self :Dict , lowercase_ :UNetaDConditionModel , lowercase_ :DDPMScheduler , lowercase_ :VQModel , ) -> List[str]: super().__init__() self.register_modules( unet=lowercase_ , scheduler=lowercase_ , movq=lowercase_ , ) UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1) def UpperCAmelCase__ ( self :Optional[int] , lowercase_ :Optional[Any] , lowercase_ :Tuple , lowercase_ :Any ) -> Optional[int]: # get the original timestep using init_timestep UpperCAmelCase = min(int(num_inference_steps * strength ) , lowercase_ ) UpperCAmelCase = max(num_inference_steps - init_timestep , 0 ) UpperCAmelCase = self.scheduler.timesteps[t_start:] return timesteps, num_inference_steps - t_start def UpperCAmelCase__ ( self :List[Any] , lowercase_ :Dict , lowercase_ :str , lowercase_ :Optional[Any] , lowercase_ :Union[str, Any] , lowercase_ :List[Any] , lowercase_ :Optional[Any] , lowercase_ :Any=None ) -> Any: if not isinstance(lowercase_ , (torch.Tensor, PIL.Image.Image, list) ): raise ValueError( f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowercase_ )}""" ) UpperCAmelCase = image.to(device=lowercase_ , dtype=lowercase_ ) UpperCAmelCase = batch_size * num_images_per_prompt if image.shape[1] == 4: UpperCAmelCase = image else: if isinstance(lowercase_ , lowercase_ ) and len(lowercase_ ) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(lowercase_ )}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" ) elif isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [ self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowercase_ ) ] UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) else: UpperCAmelCase = self.movq.encode(lowercase_ ).latent_dist.sample(lowercase_ ) UpperCAmelCase = self.movq.config.scaling_factor * init_latents UpperCAmelCase = torch.cat([init_latents] , dim=0 ) UpperCAmelCase = init_latents.shape UpperCAmelCase = randn_tensor(lowercase_ , generator=lowercase_ , device=lowercase_ , dtype=lowercase_ ) # get latents UpperCAmelCase = self.scheduler.add_noise(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = init_latents return latents def UpperCAmelCase__ ( self :int , lowercase_ :int=0 ) -> List[str]: if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError('Please install accelerate via `pip install accelerate`' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) UpperCAmelCase = [ self.unet, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(lowercase_ , lowercase_ ) def UpperCAmelCase__ ( self :Union[str, Any] , lowercase_ :str=0 ) -> Dict: if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ): from accelerate import cpu_offload_with_hook else: raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' ) UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" ) if self.device.type != "cpu": self.to('cpu' , silence_dtype_warnings=lowercase_ ) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) UpperCAmelCase = None for cpu_offloaded_model in [self.unet, self.movq]: UpperCAmelCase , UpperCAmelCase = cpu_offload_with_hook(lowercase_ , lowercase_ , prev_module_hook=lowercase_ ) # We'll offload the last model manually. UpperCAmelCase = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def UpperCAmelCase__ ( self :List[Any] ) -> Dict: if not hasattr(self.unet , '_hf_hook' ): return self.device for module in self.unet.modules(): if ( hasattr(lowercase_ , '_hf_hook' ) and hasattr(module._hf_hook , 'execution_device' ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device @torch.no_grad() @replace_example_docstring(lowercase_ ) def __call__( self :str , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] , lowercase_ :Union[torch.FloatTensor, List[torch.FloatTensor]] , lowercase_ :int = 5_12 , lowercase_ :int = 5_12 , lowercase_ :int = 1_00 , lowercase_ :float = 4.0 , lowercase_ :float = 0.3 , lowercase_ :int = 1 , lowercase_ :Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowercase_ :Optional[str] = "pil" , lowercase_ :bool = True , ) -> List[str]: UpperCAmelCase = self._execution_device UpperCAmelCase = guidance_scale > 1.0 if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) UpperCAmelCase = image_embeds.shape[0] if isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = torch.cat(lowercase_ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase = image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase_ , dim=0 ) UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowercase_ ) if not isinstance(lowercase_ , lowercase_ ): UpperCAmelCase = [image] if not all(isinstance(lowercase_ , (PIL.Image.Image, torch.Tensor) ) for i in image ): raise ValueError( f"""Input is in incorrect format: {[type(lowercase_ ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" ) UpperCAmelCase = torch.cat([prepare_image(lowercase_ , lowercase_ , lowercase_ ) for i in image] , dim=0 ) UpperCAmelCase = image.to(dtype=image_embeds.dtype , device=lowercase_ ) UpperCAmelCase = self.movq.encode(lowercase_ )['latents'] UpperCAmelCase = latents.repeat_interleave(lowercase_ , dim=0 ) self.scheduler.set_timesteps(lowercase_ , device=lowercase_ ) UpperCAmelCase , UpperCAmelCase = self.get_timesteps(lowercase_ , lowercase_ , lowercase_ ) UpperCAmelCase = timesteps[:1].repeat(batch_size * num_images_per_prompt ) UpperCAmelCase , UpperCAmelCase = downscale_height_and_width(lowercase_ , lowercase_ , self.movq_scale_factor ) UpperCAmelCase = self.prepare_latents( lowercase_ , lowercase_ , lowercase_ , lowercase_ , image_embeds.dtype , lowercase_ , lowercase_ ) for i, t in enumerate(self.progress_bar(lowercase_ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase = {'image_embeds': image_embeds} UpperCAmelCase = self.unet( sample=lowercase_ , timestep=lowercase_ , encoder_hidden_states=lowercase_ , added_cond_kwargs=lowercase_ , return_dict=lowercase_ , )[0] if do_classifier_free_guidance: UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) UpperCAmelCase , UpperCAmelCase = noise_pred.chunk(2 ) UpperCAmelCase , UpperCAmelCase = variance_pred.chunk(2 ) UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 ) if not ( hasattr(self.scheduler.config , 'variance_type' ) and self.scheduler.config.variance_type in ["learned", "learned_range"] ): UpperCAmelCase , UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 ) # compute the previous noisy sample x_t -> x_t-1 UpperCAmelCase = self.scheduler.step( lowercase_ , lowercase_ , lowercase_ , generator=lowercase_ , )[0] # post-processing UpperCAmelCase = self.movq.decode(lowercase_ , force_not_quantize=lowercase_ )['sample'] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" ) if output_type in ["np", "pil"]: UpperCAmelCase = image * 0.5 + 0.5 UpperCAmelCase = image.clamp(0 , 1 ) UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() if output_type == "pil": UpperCAmelCase = self.numpy_to_pil(lowercase_ ) if not return_dict: return (image,) return ImagePipelineOutput(images=lowercase_ )
78
0
'''simple docstring''' from __future__ import annotations import math import numpy as np from numpy.linalg import norm def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" if dataset.ndim != value_array.ndim: _lowerCAmelCase = ( """Wrong input data's dimensions... """ f"dataset : {dataset.ndim}, value_array : {value_array.ndim}" ) raise ValueError(lowerCAmelCase ) try: if dataset.shape[1] != value_array.shape[1]: _lowerCAmelCase = ( """Wrong input data's shape... """ f"dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}" ) raise ValueError(lowerCAmelCase ) except IndexError: if dataset.ndim != value_array.ndim: raise TypeError("""Wrong shape""" ) if dataset.dtype != value_array.dtype: _lowerCAmelCase = ( """Input data have different datatype... """ f"dataset : {dataset.dtype}, value_array : {value_array.dtype}" ) raise TypeError(lowerCAmelCase ) _lowerCAmelCase = [] for value in value_array: _lowerCAmelCase = euclidean(lowerCAmelCase , dataset[0] ) _lowerCAmelCase = dataset[0].tolist() for dataset_value in dataset[1:]: _lowerCAmelCase = euclidean(lowerCAmelCase , lowerCAmelCase ) if dist > temp_dist: _lowerCAmelCase = temp_dist _lowerCAmelCase = dataset_value.tolist() answer.append([vector, dist] ) return answer def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" return np.dot(lowerCAmelCase , lowerCAmelCase ) / (norm(lowerCAmelCase ) * norm(lowerCAmelCase )) if __name__ == "__main__": import doctest doctest.testmod()
220
'''simple docstring''' import json import os import tempfile import datasets from utils import generate_example_dataset, get_duration A__ : str =5_00_00 A__ : Optional[int] =50_00 A__ , A__ : Optional[int] =os.path.split(__file__) A__ : Tuple =os.path.join(RESULTS_BASEPATH, '''results''', RESULTS_FILENAME.replace('''.py''', '''.json''')) @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for i in range(lowerCAmelCase ): _lowerCAmelCase = dataset[i] @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" for i in range(0 , len(lowerCAmelCase ) , lowerCAmelCase ): _lowerCAmelCase = dataset[i : i + batch_size] @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with dataset.formatted_as(type=lowerCAmelCase ): for i in range(lowerCAmelCase ): _lowerCAmelCase = dataset[i] @get_duration def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" with dataset.formatted_as(type=lowerCAmelCase ): for i in range(0 , lowerCAmelCase , lowerCAmelCase ): _lowerCAmelCase = dataset[i : i + batch_size] def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES} _lowerCAmelCase = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}), (read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] _lowerCAmelCase = [ (read, {"""length""": SMALL_TEST}), (read, {"""length""": SPEED_TEST_N_EXAMPLES}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_00}), (read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10_00}), (read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}), (read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10_00}), ] with tempfile.TemporaryDirectory() as tmp_dir: print("""generating dataset""" ) _lowerCAmelCase = datasets.Features( {"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} ) _lowerCAmelCase = generate_example_dataset( os.path.join(lowerCAmelCase , """dataset.arrow""" ) , lowerCAmelCase , num_examples=lowerCAmelCase , seq_shapes={"""list""": (1_00,)} , ) print("""first set of iterations""" ) for func, kwargs in functions: print(func.__name__ , str(lowerCAmelCase ) ) _lowerCAmelCase = func(lowerCAmelCase , **lowerCAmelCase ) print("""shuffling dataset""" ) _lowerCAmelCase = dataset.shuffle() print("""Second set of iterations (after shuffling""" ) for func, kwargs in functions_shuffled: print("""shuffled """ , func.__name__ , str(lowerCAmelCase ) ) _lowerCAmelCase = func( lowerCAmelCase , **lowerCAmelCase ) with open(lowerCAmelCase , """wb""" ) as f: f.write(json.dumps(lowerCAmelCase ).encode("""utf-8""" ) ) if __name__ == "__main__": # useful to run the profiler benchmark_iterating()
220
1
import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ): '''simple docstring''' raise RuntimeError('CUDA out of memory.' ) class lowercase ( nn.Module ): def __init__( self ): super().__init__() snake_case_ = nn.Linear(3 , 4 ) snake_case_ = nn.BatchNormad(4 ) snake_case_ = nn.Linear(4 , 5 ) def a ( self , snake_case ): return self.lineara(self.batchnorm(self.lineara(snake_case ) ) ) class lowercase ( unittest.TestCase ): def a ( self ): snake_case_ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case ): nonlocal batch_sizes batch_sizes.append(snake_case ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(snake_case , [128, 64, 32, 16, 8] ) def a ( self ): snake_case_ = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case , snake_case ): nonlocal batch_sizes batch_sizes.append(snake_case ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga snake_case_ , snake_case_ = mock_training_loop_function('hello' ) self.assertListEqual(snake_case , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, 'hello'] ) def a ( self ): @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(snake_case ): pass with self.assertRaises(snake_case ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def a ( self ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(snake_case ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(snake_case ) as cm: mock_training_loop_function() self.assertIn('No executable batch size found, reached zero.' , cm.exception.args[0] ) def a ( self ): @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(snake_case , snake_case , snake_case ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(snake_case ) as cm: mock_training_loop_function(128 , 'hello' , 'world' ) self.assertIn('Batch size was passed into `f`' , cm.exception.args[0] ) self.assertIn('`f(arg1=\'hello\', arg2=\'world\')' , cm.exception.args[0] ) def a ( self ): @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(snake_case ): raise ValueError('Oops, we had an error!' ) with self.assertRaises(snake_case ) as cm: mock_training_loop_function() self.assertIn('Oops, we had an error!' , cm.exception.args[0] ) @require_cuda def a ( self ): snake_case_ = torch.cuda.memory_allocated() snake_case_ = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , snake_case ) snake_case_ = release_memory(snake_case ) self.assertEqual(torch.cuda.memory_allocated() , snake_case )
285
import argparse import json from dataclasses import dataclass, field from functools import partial from pathlib import Path from typing import List import timm import torch import torch.nn as nn from huggingface_hub import hf_hub_download from torch import Tensor from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification from transformers.utils import logging logging.set_verbosity_info() _UpperCAmelCase : List[Any] = logging.get_logger() @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : List[nn.Module] = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : list = field(default_factory=lowercase_ ) def a ( self , snake_case , snake_case , snake_case ): snake_case_ = len(list(m.modules() ) ) == 1 or isinstance(snake_case , nn.Convad ) or isinstance(snake_case , nn.BatchNormad ) if has_not_submodules: self.traced.append(snake_case ) def __call__( self , snake_case ): for m in self.module.modules(): self.handles.append(m.register_forward_hook(self._forward_hook ) ) self.module(snake_case ) [x.remove() for x in self.handles] return self @property def a ( self ): # check the len of the state_dict keys to see if we have learnable params return list(filter(lambda snake_case : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) ) @dataclass class lowercase : __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : nn.Module __SCREAMING_SNAKE_CASE : int = 0 __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) __SCREAMING_SNAKE_CASE : List = field(default_factory=lowercase_ ) def __call__( self , snake_case ): snake_case_ = Tracker(self.dest )(snake_case ).parametrized snake_case_ = Tracker(self.src )(snake_case ).parametrized snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.src_skip , snake_case ) ) snake_case_ = list(filter(lambda snake_case : type(snake_case ) not in self.dest_skip , snake_case ) ) if len(snake_case ) != len(snake_case ): raise Exception( F'''Numbers of operations are different. Source module has {len(snake_case )} operations while''' F''' destination module has {len(snake_case )}.''' ) for dest_m, src_m in zip(snake_case , snake_case ): dest_m.load_state_dict(src_m.state_dict() ) if self.verbose == 1: print(F'''Transfered from={src_m} to={dest_m}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = True ): '''simple docstring''' print(F'''Converting {name}...''' ) with torch.no_grad(): snake_case_ = timm.create_model(UpperCamelCase__ , pretrained=UpperCamelCase__ ).eval() snake_case_ = ResNetForImageClassification(UpperCamelCase__ ).eval() snake_case_ = ModuleTransfer(src=UpperCamelCase__ , dest=UpperCamelCase__ ) snake_case_ = torch.randn((1, 3, 224, 224) ) module_transfer(UpperCamelCase__ ) assert torch.allclose(from_model(UpperCamelCase__ ) , our_model(UpperCamelCase__ ).logits ), "The model logits don't match the original one." snake_case_ = F'''resnet{"-".join(name.split("resnet" ) )}''' print(UpperCamelCase__ ) if push_to_hub: our_model.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCamelCase__ , ) # we can use the convnext one snake_case_ = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' ) image_processor.push_to_hub( repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCamelCase__ , ) print(F'''Pushed {checkpoint_name}''' ) def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = None , UpperCamelCase__ = True ): '''simple docstring''' snake_case_ = 'imagenet-1k-id2label.json' snake_case_ = 1000 snake_case_ = (1, num_labels) snake_case_ = 'huggingface/label-files' snake_case_ = num_labels snake_case_ = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) ) snake_case_ = {int(UpperCamelCase__ ): v for k, v in idalabel.items()} snake_case_ = idalabel snake_case_ = {v: k for k, v in idalabel.items()} snake_case_ = partial(UpperCamelCase__ , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ ) snake_case_ = { 'resnet18': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet26': ImageNetPreTrainedConfig( depths=[2, 2, 2, 2] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet34': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[64, 128, 256, 512] , layer_type='basic' ), 'resnet50': ImageNetPreTrainedConfig( depths=[3, 4, 6, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet101': ImageNetPreTrainedConfig( depths=[3, 4, 23, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), 'resnet152': ImageNetPreTrainedConfig( depths=[3, 8, 36, 3] , hidden_sizes=[256, 512, 1024, 2048] , layer_type='bottleneck' ), } if model_name: convert_weight_and_push(UpperCamelCase__ , names_to_config[model_name] , UpperCamelCase__ , UpperCamelCase__ ) else: for model_name, config in names_to_config.items(): convert_weight_and_push(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) return config, expected_shape if __name__ == "__main__": _UpperCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default=None, type=str, help=( """The name of the model you wish to convert, it must be one of the supported resnet* architecture,""" """ currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.""" ), ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=Path, required=True, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", default=True, type=bool, required=False, help="""If True, push model and image processor to the hub.""", ) _UpperCAmelCase : Optional[Any] = parser.parse_args() _UpperCAmelCase : Path = args.pytorch_dump_folder_path pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True) convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
285
1
import unittest from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin __UpperCAmelCase = get_tests_dir("""fixtures/spiece.model""") @require_sentencepiece @require_tokenizers class SCREAMING_SNAKE_CASE ( a_ , unittest.TestCase ): """simple docstring""" lowerCamelCase : Dict =DebertaVaTokenizer lowerCamelCase : Optional[Any] =DebertaVaTokenizerFast lowerCamelCase : List[str] =True lowerCamelCase : Any =True def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing __lowerCAmelCase : List[Any] = DebertaVaTokenizer(lowerCAmelCase , unk_token="""<unk>""" ) tokenizer.save_pretrained(self.tmpdirname ) def SCREAMING_SNAKE_CASE ( self : Any , lowerCAmelCase : List[str] ) -> Tuple: """simple docstring""" __lowerCAmelCase : Optional[Any] = """this is a test""" __lowerCAmelCase : Tuple = """this is a test""" return input_text, output_text def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Optional[int] = """<pad>""" __lowerCAmelCase : List[Any] = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]: """simple docstring""" __lowerCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , """<pad>""" ) self.assertEqual(vocab_keys[1] , """<unk>""" ) self.assertEqual(vocab_keys[-1] , """[PAD]""" ) self.assertEqual(len(lowerCAmelCase ) , 3_00_01 ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 3_00_00 ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]: """simple docstring""" __lowerCAmelCase : Optional[Any] = """ \tHeLLo!how \n Are yoU? """ __lowerCAmelCase : Tuple = ["""▁hello""", """!""", """how""", """▁are""", """▁you""", """?"""] # fmt: on __lowerCAmelCase : Optional[Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase ) __lowerCAmelCase : List[str] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : str = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase ) __lowerCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: """simple docstring""" pass @unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" ) def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]: """simple docstring""" pass def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]: """simple docstring""" __lowerCAmelCase : int = """I was born in 92000, and this is falsé.""" __lowerCAmelCase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on __lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ) -> int: """simple docstring""" __lowerCAmelCase : Any = """I was born in 92000, and this is falsé.""" __lowerCAmelCase : Optional[int] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on __lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : Optional[Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : List[Any] = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : Tuple = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: """simple docstring""" __lowerCAmelCase : Any = """I was born in 92000, and this is falsé.""" __lowerCAmelCase : Union[str, Any] = ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on __lowerCAmelCase : Optional[Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : str = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Optional[int] = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : int ) -> List[str]: """simple docstring""" __lowerCAmelCase : Tuple = """I was born in 92000, and this is falsé.""" __lowerCAmelCase : int = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """▁""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """▁""", """.""", ] # fmt: on __lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : int = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] ) -> int: """simple docstring""" __lowerCAmelCase : Optional[int] = """ \tHeLLo!how \n Are yoU? """ __lowerCAmelCase : Any = ["""▁""", """<unk>""", """e""", """<unk>""", """o""", """!""", """how""", """▁""", """<unk>""", """re""", """▁yo""", """<unk>""", """?"""] # fmt: on __lowerCAmelCase : Tuple = DebertaVaTokenizer(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Any = DebertaVaTokenizerFast(lowerCAmelCase , do_lower_case=lowerCAmelCase , split_by_punct=lowerCAmelCase ) __lowerCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]: """simple docstring""" __lowerCAmelCase : int = self.get_tokenizer() __lowerCAmelCase : Tuple = self.get_rust_tokenizer() __lowerCAmelCase : int = """I was born in 92000, and this is falsé.""" __lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) __lowerCAmelCase : int = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Tuple = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) __lowerCAmelCase : int = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : List[Any] = self.get_rust_tokenizer() __lowerCAmelCase : Any = tokenizer.encode(lowerCAmelCase ) __lowerCAmelCase : str = rust_tokenizer.encode(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : Any ) -> int: """simple docstring""" __lowerCAmelCase : int = """This is a test""" __lowerCAmelCase : List[Any] = [13, 1, 43_98, 25, 21, 12_89] __lowerCAmelCase : Dict = ["""▁""", """T""", """his""", """▁is""", """▁a""", """▁test"""] __lowerCAmelCase : Dict = ["""▁""", """<unk>""", """his""", """▁is""", """▁a""", """▁test"""] __lowerCAmelCase : Dict = DebertaVaTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase ) __lowerCAmelCase : Any = DebertaVaTokenizerFast(lowerCAmelCase , keep_accents=lowerCAmelCase ) __lowerCAmelCase : Union[str, Any] = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : List[str] = tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Dict = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : List[Any] = rust_tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) # fmt: off __lowerCAmelCase : List[Any] = """I was born in 92000, and this is falsé.""" __lowerCAmelCase : List[Any] = [13, 1, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] __lowerCAmelCase : Union[str, Any] = ["""▁""", """I""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """.""", ] __lowerCAmelCase : str = ["""▁""", """<unk>""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """.""", ] # fmt: on __lowerCAmelCase : Any = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Optional[Any] = tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : int = tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : str = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) __lowerCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(lowerCAmelCase ) self.assertListEqual(lowerCAmelCase , lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: """simple docstring""" __lowerCAmelCase : List[Any] = DebertaVaTokenizer(lowerCAmelCase ) __lowerCAmelCase : Tuple = tokenizer.encode("""sequence builders""" ) __lowerCAmelCase : int = tokenizer.encode("""multi-sequence build""" ) __lowerCAmelCase : str = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase ) __lowerCAmelCase : Optional[int] = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase ) self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , lowerCAmelCase ) self.assertEqual( [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , lowerCAmelCase , ) @slow def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase : Optional[int] = {"""input_ids""": [[1, 3_98_67, 36, 1_93_90, 4_86, 27, 3_50_52, 8_14_36, 18, 6_06_85, 12_25, 7, 3_50_52, 8_14_36, 18, 93_67, 1_68_99, 18, 1_59_37, 53, 5_94, 7_73, 18, 1_62_87, 3_04_65, 36, 1_59_37, 6, 4_11_39, 38, 3_69_79, 6_07_63, 1_91, 6, 3_41_32, 99, 6, 5_05_38, 3_90, 4_32_30, 6, 3_41_32, 27_79, 2_08_50, 14, 6_99, 10_72, 11_94, 36, 3_82, 1_09_01, 53, 7, 6_99, 10_72, 20_84, 36, 2_04_22, 6_30, 53, 19, 1_05, 30_49, 18_96, 10_53, 1_68_99, 15_06, 11, 3_79_78, 42_43, 7, 12_37, 3_18_69, 2_00, 1_65_66, 6_54, 6, 3_50_52, 8_14_36, 7, 5_56_30, 1_35_93, 4, 2], [1, 26, 1_50_11, 13, 6_67, 8, 10_53, 18, 2_36_11, 12_37, 7_23_56, 1_28_20, 34, 10_41_34, 12_09, 35, 1_33_13, 66_27, 21, 2_02, 3_47, 7, 1_64, 23_99, 11, 46, 44_85, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 12_32, 28_64, 1_57_85, 1_49_51, 1_05, 5, 85_81, 12_50, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=lowerCAmelCase , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
139
import warnings from ...utils import logging from .image_processing_glpn import GLPNImageProcessor __UpperCAmelCase = logging.get_logger(__name__) class SCREAMING_SNAKE_CASE ( a_ ): """simple docstring""" def __init__( self : Optional[Any] , *lowerCAmelCase : int , **lowerCAmelCase : Optional[int] ) -> None: """simple docstring""" warnings.warn( """The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please""" """ use GLPNImageProcessor instead.""" , lowerCAmelCase , ) super().__init__(*lowerCAmelCase , **lowerCAmelCase )
139
1
'''simple docstring''' import os from tempfile import TemporaryDirectory from unittest import TestCase import pytest from absl.testing import parameterized from datasets import config from datasets.arrow_reader import HF_GCP_BASE_URL from datasets.builder import DatasetBuilder from datasets.dataset_dict import IterableDatasetDict from datasets.iterable_dataset import IterableDataset from datasets.load import dataset_module_factory, import_main_class from datasets.utils.file_utils import cached_path __SCREAMING_SNAKE_CASE : Union[str, Any] = [ {"""dataset""": """wikipedia""", """config_name""": """20220301.de"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.en"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.fr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.frr"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.it"""}, {"""dataset""": """wikipedia""", """config_name""": """20220301.simple"""}, {"""dataset""": """snli""", """config_name""": """plain_text"""}, {"""dataset""": """eli5""", """config_name""": """LFQA_reddit"""}, {"""dataset""": """wiki40b""", """config_name""": """en"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.compressed"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.nq.no_index"""}, {"""dataset""": """wiki_dpr""", """config_name""": """psgs_w100.multiset.no_index"""}, {"""dataset""": """natural_questions""", """config_name""": """default"""}, ] def UpperCamelCase_ ( _UpperCAmelCase : Optional[int]=True ) -> Tuple: """simple docstring""" if with_config: return [ { "testcase_name": d["dataset"] + "/" + d["config_name"], "dataset": d["dataset"], "config_name": d["config_name"], } for d in DATASETS_ON_HF_GCP ] else: return [ {"testcase_name": dataset, "dataset": dataset} for dataset in {d["dataset"] for d in DATASETS_ON_HF_GCP} ] @parameterized.named_parameters(list_datasets_on_hf_gcp_parameters(with_config=snake_case__ ) ) class lowerCamelCase_ (snake_case__ ): '''simple docstring''' __UpperCamelCase: Optional[Any] = None __UpperCamelCase: int = None def _A ( self : str , A : str , A : List[Any] ): with TemporaryDirectory() as tmp_dir: _UpperCAmelCase : int = dataset_module_factory(A , cache_dir=A ) _UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path , dataset=A ) _UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=A , config_name=A , hash=dataset_module.hash , ) _UpperCAmelCase : Tuple = "/".join( [ HF_GCP_BASE_URL, builder_instance._relative_data_dir(with_hash=A ).replace(os.sep , "/" ), config.DATASET_INFO_FILENAME, ] ) _UpperCAmelCase : Optional[Any] = cached_path(A , cache_dir=A ) self.assertTrue(os.path.exists(A ) ) @pytest.mark.integration def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Tuple: """simple docstring""" _UpperCAmelCase : int = tmp_path_factory.mktemp("test_hf_gcp" ) / "test_wikipedia_simple" _UpperCAmelCase : Union[str, Any] = dataset_module_factory("wikipedia" , cache_dir=_UpperCAmelCase ) _UpperCAmelCase : str = import_main_class(dataset_module.module_path ) _UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=_UpperCAmelCase , config_name="20220301.frr" , hash=dataset_module.hash , ) # use the HF cloud storage, not the original download_and_prepare that uses apache-beam _UpperCAmelCase : Dict = None builder_instance.download_and_prepare() _UpperCAmelCase : List[str] = builder_instance.as_dataset() assert ds @pytest.mark.integration def UpperCamelCase_ ( _UpperCAmelCase : Optional[Any] ) -> Dict: """simple docstring""" _UpperCAmelCase : List[str] = dataset_module_factory("wikipedia" , cache_dir=_UpperCAmelCase ) _UpperCAmelCase : List[Any] = import_main_class(dataset_module.module_path , dataset=_UpperCAmelCase ) _UpperCAmelCase : DatasetBuilder = builder_cls( cache_dir=_UpperCAmelCase , config_name="20220301.frr" , hash=dataset_module.hash , ) _UpperCAmelCase : Dict = builder_instance.as_streaming_dataset() assert ds assert isinstance(_UpperCAmelCase , _UpperCAmelCase ) assert "train" in ds assert isinstance(ds["train"] , _UpperCAmelCase ) assert next(iter(ds["train"] ) )
31
'''simple docstring''' import multiprocessing from typing import TYPE_CHECKING, Optional, Union from .. import Dataset, Features, config from ..formatting import query_table from ..packaged_modules.sql.sql import Sql from ..utils import logging from .abc import AbstractDatasetInputStream if TYPE_CHECKING: import sqlitea import sqlalchemy class __magic_name__ ( _UpperCamelCase ): def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,): super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase ) _a : Tuple = Sql( cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,) def __lowercase ( self : Dict ): _a : Optional[Any] = None _a : Dict = None _a : Dict = None _a : Optional[int] = None self.builder.download_and_prepare( download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,) # Build dataset for splits _a : List[str] = self.builder.as_dataset( split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory ) return dataset class __magic_name__ : def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,): if num_proc is not None and num_proc <= 0: raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" ) _a : Dict = dataset _a : List[Any] = name _a : Tuple = con _a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE _a : List[Any] = num_proc _a : Tuple = to_sql_kwargs def __lowercase ( self : List[Any] ): _a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase ) _a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase ) _a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase ) _a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs ) return written def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ): _a , _a , _a : Any = args _a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs _a : Dict = query_table( table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,) _a : Tuple = batch.to_pandas() _a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase ) return num_rows or len(_UpperCAmelCase ) def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ): _a : Union[str, Any] = 0 if self.num_proc is None or self.num_proc == 1: for offset in logging.tqdm( range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += self._batch_sql((offset, index, to_sql_kwargs) ) else: _a , _a : List[Any] = len(self.dataset ), self.batch_size with multiprocessing.Pool(self.num_proc ) as pool: for num_rows in logging.tqdm( pool.imap( self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,): written += num_rows return written
89
0
def _a ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , ) -> float: """simple docstring""" lowerCamelCase__ : Union[str, Any] = [redshift, radiation_density, matter_density, dark_energy] if any(p < 0 for p in parameters ): raise ValueError('''All input parameters must be positive''' ) if any(p > 1 for p in parameters[1:4] ): raise ValueError('''Relative densities cannot be greater than one''' ) else: lowerCamelCase__ : Dict = 1 - (matter_density + radiation_density + dark_energy) lowerCamelCase__ : Any = ( radiation_density * (redshift + 1) ** 4 + matter_density * (redshift + 1) ** 3 + curvature * (redshift + 1) ** 2 + dark_energy ) lowerCamelCase__ : int = hubble_constant * e_a ** (1 / 2) return hubble if __name__ == "__main__": import doctest # run doctest doctest.testmod() # demo LCDM approximation _A : int = 0.3 print( hubble_parameter( hubble_constant=68.3, radiation_density=1e-4, matter_density=matter_density, dark_energy=1 - matter_density, redshift=0, ) )
362
def _a ( UpperCAmelCase ) -> int: """simple docstring""" if not isinstance(UpperCAmelCase , UpperCAmelCase ): raise TypeError('''only integers accepted as input''' ) else: lowerCamelCase__ : Any = str(abs(UpperCAmelCase ) ) lowerCamelCase__ : Union[str, Any] = [list(UpperCAmelCase ) for char in range(len(UpperCAmelCase ) )] for index in range(len(UpperCAmelCase ) ): num_transpositions[index].pop(UpperCAmelCase ) return max( int(''''''.join(list(UpperCAmelCase ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('doctest').testmod()
265
0
'''simple docstring''' import unittest from transformers.testing_utils import require_bsa from transformers.utils import is_bsa_available from ...test_feature_extraction_common import FeatureExtractionSavingTestMixin if is_bsa_available(): from transformers import MarkupLMFeatureExtractor class _UpperCamelCase ( unittest.TestCase ): '''simple docstring''' def __init__( self : Any , _lowerCAmelCase : int): '''simple docstring''' __lowercase =parent def __lowerCamelCase ( self : int): '''simple docstring''' return {} def _A ( ): """simple docstring""" __lowercase ='<HTML>\n\n <HEAD>\n <TITLE>sample document</TITLE>\n </HEAD>\n\n <BODY BGCOLOR="FFFFFF">\n <HR>\n <a href="http://google.com">Goog</a>\n <H1>This is one header</H1>\n <H2>This is a another Header</H2>\n <P>Travel from\n <P>\n <B>SFO to JFK</B>\n <BR>\n <B><I>on May 2, 2015 at 2:00 pm. For details go to confirm.com </I></B>\n <HR>\n <div style="color:#0000FF">\n <h3>Traveler <b> name </b> is\n <p> John Doe </p>\n </div>' __lowercase ='\n <!DOCTYPE html>\n <html>\n <body>\n\n <h1>My First Heading</h1>\n <p>My first paragraph.</p>\n\n </body>\n </html>\n ' return [html_string_a, html_string_a] @require_bsa class _UpperCamelCase ( A , unittest.TestCase ): '''simple docstring''' lowerCAmelCase__ = MarkupLMFeatureExtractor if is_bsa_available() else None def __lowerCamelCase ( self : Tuple): '''simple docstring''' __lowercase =MarkupLMFeatureExtractionTester(self) @property def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' return self.feature_extract_tester.prepare_feat_extract_dict() def __lowerCamelCase ( self : List[Any]): '''simple docstring''' __lowercase =self.feature_extraction_class() # Test not batched input __lowercase =get_html_strings()[0] __lowercase =feature_extractor(_lowerCAmelCase) # fmt: off __lowercase =[['sample document', 'Goog', 'This is one header', 'This is a another Header', 'Travel from', 'SFO to JFK', 'on May 2, 2015 at 2:00 pm. For details go to confirm.com', 'Traveler', 'name', 'is', 'John Doe']] __lowercase =[['/html/head/title', '/html/body/a', '/html/body/h1', '/html/body/h2', '/html/body/p', '/html/body/p/p/b[1]', '/html/body/p/p/b[2]/i', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/b', '/html/body/p/p/div/h3', '/html/body/p/p/div/h3/p']] # fmt: on self.assertEqual(encoding.nodes , _lowerCAmelCase) self.assertEqual(encoding.xpaths , _lowerCAmelCase) # Test batched __lowercase =get_html_strings() __lowercase =feature_extractor(_lowerCAmelCase) # fmt: off __lowercase =expected_nodes + [['My First Heading', 'My first paragraph.']] __lowercase =expected_xpaths + [['/html/body/h1', '/html/body/p']] self.assertEqual(len(encoding.nodes) , 2) self.assertEqual(len(encoding.xpaths) , 2) self.assertEqual(encoding.nodes , _lowerCAmelCase) self.assertEqual(encoding.xpaths , _lowerCAmelCase)
166
'''simple docstring''' from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxSeqaSeqConfigWithPast from ...utils import logging lowerCamelCase = logging.get_logger(__name__) lowerCamelCase = { """google/umt5-small""": """https://huggingface.co/google/umt5-small/resolve/main/config.json""", # See all umt5 models at https://huggingface.co/models?filter=umt5 } class _UpperCamelCase ( A ): '''simple docstring''' lowerCAmelCase__ = """umt5""" lowerCAmelCase__ = ["""past_key_values"""] def __init__( self : Optional[int] , _lowerCAmelCase : int=2_5_0_1_1_2 , _lowerCAmelCase : Union[str, Any]=5_1_2 , _lowerCAmelCase : List[Any]=6_4 , _lowerCAmelCase : Optional[Any]=1_0_2_4 , _lowerCAmelCase : Union[str, Any]=8 , _lowerCAmelCase : Any=None , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : str=3_2 , _lowerCAmelCase : List[str]=1_2_8 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : Tuple=1e-6 , _lowerCAmelCase : List[Any]=1.0 , _lowerCAmelCase : Union[str, Any]="gated-gelu" , _lowerCAmelCase : List[str]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : Tuple="T5Tokenizer" , _lowerCAmelCase : List[Any]=True , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Union[str, Any]=1 , _lowerCAmelCase : Any=0 , **_lowerCAmelCase : int , ): '''simple docstring''' super().__init__( is_encoder_decoder=_lowerCAmelCase , tokenizer_class=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , pad_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , decoder_start_token_id=_lowerCAmelCase , **_lowerCAmelCase , ) __lowercase =vocab_size __lowercase =d_model __lowercase =d_kv __lowercase =d_ff __lowercase =num_layers __lowercase =( num_decoder_layers if num_decoder_layers is not None else self.num_layers ) # default = symmetry __lowercase =num_heads __lowercase =relative_attention_num_buckets __lowercase =relative_attention_max_distance __lowercase =dropout_rate __lowercase =layer_norm_epsilon __lowercase =initializer_factor __lowercase =feed_forward_proj __lowercase =use_cache __lowercase =self.feed_forward_proj.split('-') __lowercase =act_info[-1] __lowercase =act_info[0] == 'gated' if len(_lowerCAmelCase) > 1 and act_info[0] != "gated" or len(_lowerCAmelCase) > 2: raise ValueError( f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.""" 'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. ' '\'gated-gelu\' or \'relu\'') if feed_forward_proj == "gated-gelu": __lowercase ='gelu_new' @property def __lowerCamelCase ( self : List[Any]): '''simple docstring''' return self.d_model @property def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return self.num_heads @property def __lowerCamelCase ( self : int): '''simple docstring''' return self.num_layers class _UpperCamelCase ( A ): '''simple docstring''' @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs def __lowerCamelCase ( self : Optional[int]): '''simple docstring''' __lowercase ={ 'input_ids': {0: 'batch', 1: 'encoder_sequence'}, 'attention_mask': {0: 'batch', 1: 'encoder_sequence'}, } if self.use_past: __lowercase ='past_encoder_sequence + sequence' __lowercase ={0: 'batch'} __lowercase ={0: 'batch', 1: 'past_decoder_sequence + sequence'} else: __lowercase ={0: 'batch', 1: 'decoder_sequence'} __lowercase ={0: 'batch', 1: 'decoder_sequence'} if self.use_past: self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs') return common_inputs @property # Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset def __lowerCamelCase ( self : Optional[Any]): '''simple docstring''' return 1_3 @property def __lowerCamelCase ( self : int): '''simple docstring''' return 5e-4
166
1
'''simple docstring''' import argparse import json from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( BertTokenizer, ViltConfig, ViltForImageAndTextRetrieval, ViltForImagesAndTextClassification, ViltForMaskedLM, ViltForQuestionAnswering, ViltImageProcessor, ViltProcessor, ) from transformers.utils import logging logging.set_verbosity_info() __lowerCAmelCase = logging.get_logger(__name__) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=False ) -> Optional[int]: _a : Dict = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((f"""transformer.blocks.{i}.norm1.weight""", f"""vilt.encoder.layer.{i}.layernorm_before.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.norm1.bias""", f"""vilt.encoder.layer.{i}.layernorm_before.bias""") ) rename_keys.append( (f"""transformer.blocks.{i}.attn.proj.weight""", f"""vilt.encoder.layer.{i}.attention.output.dense.weight""") ) rename_keys.append( (f"""transformer.blocks.{i}.attn.proj.bias""", f"""vilt.encoder.layer.{i}.attention.output.dense.bias""") ) rename_keys.append((f"""transformer.blocks.{i}.norm2.weight""", f"""vilt.encoder.layer.{i}.layernorm_after.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.norm2.bias""", f"""vilt.encoder.layer.{i}.layernorm_after.bias""") ) rename_keys.append( (f"""transformer.blocks.{i}.mlp.fc1.weight""", f"""vilt.encoder.layer.{i}.intermediate.dense.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc1.bias""", f"""vilt.encoder.layer.{i}.intermediate.dense.bias""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.weight""", f"""vilt.encoder.layer.{i}.output.dense.weight""") ) rename_keys.append((f"""transformer.blocks.{i}.mlp.fc2.bias""", f"""vilt.encoder.layer.{i}.output.dense.bias""") ) # embeddings rename_keys.extend( [ # text embeddings ('text_embeddings.word_embeddings.weight', 'vilt.embeddings.text_embeddings.word_embeddings.weight'), ( 'text_embeddings.position_embeddings.weight', 'vilt.embeddings.text_embeddings.position_embeddings.weight', ), ('text_embeddings.position_ids', 'vilt.embeddings.text_embeddings.position_ids'), ( 'text_embeddings.token_type_embeddings.weight', 'vilt.embeddings.text_embeddings.token_type_embeddings.weight', ), ('text_embeddings.LayerNorm.weight', 'vilt.embeddings.text_embeddings.LayerNorm.weight'), ('text_embeddings.LayerNorm.bias', 'vilt.embeddings.text_embeddings.LayerNorm.bias'), # patch embeddings ('transformer.cls_token', 'vilt.embeddings.cls_token'), ('transformer.patch_embed.proj.weight', 'vilt.embeddings.patch_embeddings.projection.weight'), ('transformer.patch_embed.proj.bias', 'vilt.embeddings.patch_embeddings.projection.bias'), ('transformer.pos_embed', 'vilt.embeddings.position_embeddings'), # token type embeddings ('token_type_embeddings.weight', 'vilt.embeddings.token_type_embeddings.weight'), ] ) # final layernorm + pooler rename_keys.extend( [ ('transformer.norm.weight', 'vilt.layernorm.weight'), ('transformer.norm.bias', 'vilt.layernorm.bias'), ('pooler.dense.weight', 'vilt.pooler.dense.weight'), ('pooler.dense.bias', 'vilt.pooler.dense.bias'), ] ) # classifier head(s) if vqa_model: # classification head rename_keys.extend( [ ('vqa_classifier.0.weight', 'classifier.0.weight'), ('vqa_classifier.0.bias', 'classifier.0.bias'), ('vqa_classifier.1.weight', 'classifier.1.weight'), ('vqa_classifier.1.bias', 'classifier.1.bias'), ('vqa_classifier.3.weight', 'classifier.3.weight'), ('vqa_classifier.3.bias', 'classifier.3.bias'), ] ) elif nlvr_model: # classification head rename_keys.extend( [ ('nlvr2_classifier.0.weight', 'classifier.0.weight'), ('nlvr2_classifier.0.bias', 'classifier.0.bias'), ('nlvr2_classifier.1.weight', 'classifier.1.weight'), ('nlvr2_classifier.1.bias', 'classifier.1.bias'), ('nlvr2_classifier.3.weight', 'classifier.3.weight'), ('nlvr2_classifier.3.bias', 'classifier.3.bias'), ] ) else: pass return rename_keys def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: for i in range(config.num_hidden_layers ): _a : List[str] = 'vilt.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) _a : List[Any] = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.weight""" ) _a : List[str] = state_dict.pop(f"""transformer.blocks.{i}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _a : Tuple = in_proj_weight[ : config.hidden_size, : ] _a : Tuple = in_proj_bias[: config.hidden_size] _a : List[Any] = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] _a : str = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] _a : Optional[int] = in_proj_weight[ -config.hidden_size :, : ] _a : List[Any] = in_proj_bias[-config.hidden_size :] def __lowerCamelCase ( lowerCAmelCase_ ) -> List[str]: _a : int = ['head.weight', 'head.bias'] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Optional[int]: _a : Tuple = dct.pop(lowerCAmelCase_ ) _a : int = val @torch.no_grad() def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple: _a : int = ViltConfig(image_size=384 , patch_size=32 , tie_word_embeddings=lowerCAmelCase_ ) _a : Tuple = False _a : Any = False _a : Optional[Any] = False _a : int = False if "vqa" in checkpoint_url: _a : List[Any] = True _a : Any = 3129 _a : List[str] = 'huggingface/label-files' _a : str = 'vqa2-id2label.json' _a : int = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='dataset' ) , 'r' ) ) _a : Dict = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()} _a : Tuple = idalabel _a : Tuple = {v: k for k, v in idalabel.items()} _a : Tuple = ViltForQuestionAnswering(lowerCAmelCase_ ) elif "nlvr" in checkpoint_url: _a : Any = True _a : int = 2 _a : Optional[Any] = {0: 'False', 1: 'True'} _a : Dict = {v: k for k, v in config.idalabel.items()} _a : Union[str, Any] = 3 _a : List[Any] = ViltForImagesAndTextClassification(lowerCAmelCase_ ) elif "irtr" in checkpoint_url: _a : Tuple = True _a : Any = ViltForImageAndTextRetrieval(lowerCAmelCase_ ) elif "mlm_itm" in checkpoint_url: _a : Optional[int] = True _a : Tuple = ViltForMaskedLM(lowerCAmelCase_ ) else: raise ValueError('Unknown model type' ) # load state_dict of original model, remove and rename some keys _a : Dict = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location='cpu' )['state_dict'] _a : List[Any] = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) for src, dest in rename_keys: rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ ) if mlm_model or irtr_model: _a : List[Any] = ['itm_score.fc.weight', 'itm_score.fc.bias'] for k in ignore_keys: state_dict.pop(lowerCAmelCase_ , lowerCAmelCase_ ) # load state dict into HuggingFace model model.eval() if mlm_model: _a , _a : Tuple = model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ ) assert missing_keys == ["mlm_score.decoder.bias"] else: model.load_state_dict(lowerCAmelCase_ ) # Define processor _a : Union[str, Any] = ViltImageProcessor(size=384 ) _a : Optional[int] = BertTokenizer.from_pretrained('bert-base-uncased' ) _a : Optional[Any] = ViltProcessor(lowerCAmelCase_ , lowerCAmelCase_ ) # Forward pass on example inputs (image + text) if nlvr_model: _a : List[str] = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase_ ).raw ) _a : str = Image.open(requests.get('https://lil.nlp.cornell.edu/nlvr/exs/ex0_0.jpg' , stream=lowerCAmelCase_ ).raw ) _a : Union[str, Any] = ( 'The left image contains twice the number of dogs as the right image, and at least two dogs in total are' ' standing.' ) _a : Optional[Any] = processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' ) _a : List[str] = processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' ) _a : Dict = model( input_ids=encoding_a.input_ids , pixel_values=encoding_a.pixel_values , pixel_values_a=encoding_a.pixel_values , ) else: _a : Union[str, Any] = Image.open(requests.get('http://images.cocodataset.org/val2017/000000039769.jpg' , stream=lowerCAmelCase_ ).raw ) if mlm_model: _a : List[Any] = 'a bunch of [MASK] laying on a [MASK].' else: _a : Tuple = 'How many cats are there?' _a : Union[str, Any] = processor(lowerCAmelCase_ , lowerCAmelCase_ , return_tensors='pt' ) _a : Tuple = model(**lowerCAmelCase_ ) # Verify outputs if mlm_model: _a : str = torch.Size([1, 11, 30522] ) _a : Optional[Any] = torch.tensor([-12.5_061, -12.5_123, -12.5_174] ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase_ , atol=1E-4 ) # verify masked token prediction equals "cats" _a : Any = outputs.logits[0, 4, :].argmax(-1 ).item() assert tokenizer.decode([predicted_id] ) == "cats" elif vqa_model: _a : Optional[Any] = torch.Size([1, 3129] ) _a : Dict = torch.tensor([-15.9_495, -18.1_472, -10.3_041] ) assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) assert outputs.logits.shape == expected_shape assert torch.allclose(outputs.logits[0, 0, :3] , lowerCAmelCase_ , atol=1E-4 ) # verify vqa prediction equals "2" _a : str = outputs.logits.argmax(-1 ).item() assert model.config.idalabel[predicted_idx] == "2" elif nlvr_model: _a : Union[str, Any] = torch.Size([1, 2] ) _a : List[Any] = torch.tensor([-2.8_721, 2.1_291] ) assert torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1E-4 ) assert outputs.logits.shape == expected_shape Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ ) print(f"""Saving model and processor to {pytorch_dump_folder_path}""" ) model.save_pretrained(lowerCAmelCase_ ) processor.save_pretrained(lowerCAmelCase_ ) if __name__ == "__main__": __lowerCAmelCase = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint_url''', default='''https://github.com/dandelin/ViLT/releases/download/200k/vilt_200k_mlm_itm.ckpt''', type=str, help='''URL of the checkpoint you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) __lowerCAmelCase = parser.parse_args() convert_vilt_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
107
'''simple docstring''' from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. __lowerCAmelCase = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. __lowerCAmelCase = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. __lowerCAmelCase = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1_000)) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, float]: _a : List[Any] = len([g for position, g in enumerate(lowerCAmelCase_ ) if g == main_target[position]] ) return (item, float(lowerCAmelCase_ )) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> tuple[str, str]: _a : Dict = random.randint(0 , len(lowerCAmelCase_ ) - 1 ) _a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] _a : Optional[int] = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str: _a : Optional[Any] = list(lowerCAmelCase_ ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: _a : Optional[int] = random.choice(lowerCAmelCase_ ) return "".join(lowerCAmelCase_ ) def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> list[str]: _a : List[str] = [] # Generate more children proportionally to the fitness score. _a : Tuple = int(parent_a[1] * 100 ) + 1 _a : Tuple = 10 if child_n >= 10 else child_n for _ in range(lowerCAmelCase_ ): _a : Any = population_score[random.randint(0 , lowerCAmelCase_ )][0] _a , _a : Tuple = crossover(parent_a[0] , lowerCAmelCase_ ) # Append new string to the population list. pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) ) pop.append(mutate(lowerCAmelCase_ , lowerCAmelCase_ ) ) return pop def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = True ) -> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: _a : Dict = f"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(lowerCAmelCase_ ) # Verify that the target contains no genes besides the ones inside genes variable. _a : Optional[int] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: _a : List[Any] = f"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(lowerCAmelCase_ ) # Generate random starting population. _a : Union[str, Any] = [] for _ in range(lowerCAmelCase_ ): population.append(''.join([random.choice(lowerCAmelCase_ ) for i in range(len(lowerCAmelCase_ ) )] ) ) # Just some logs to know what the algorithms is doing. _a , _a : Union[str, Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(lowerCAmelCase_ ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. _a : Optional[Any] = [evaluate(lowerCAmelCase_ , lowerCAmelCase_ ) for item in population] # Check if there is a matching evolution. _a : Tuple = sorted(lowerCAmelCase_ , key=lambda lowerCAmelCase_ : x[1] , reverse=lowerCAmelCase_ ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( f"""\nGeneration: {generation}""" f"""\nTotal Population:{total_population}""" f"""\nBest score: {population_score[0][1]}""" f"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. _a : Dict = population[: int(N_POPULATION / 3 )] population.clear() population.extend(lowerCAmelCase_ ) # Normalize population score to be between 0 and 1. _a : Tuple = [ (item, score / len(lowerCAmelCase_ )) for item, score in population_score ] # This is selection for i in range(lowerCAmelCase_ ): population.extend(select(population_score[int(lowerCAmelCase_ )] , lowerCAmelCase_ , lowerCAmelCase_ ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(lowerCAmelCase_ ) > N_POPULATION: break if __name__ == "__main__": __lowerCAmelCase = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) __lowerCAmelCase = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = basic(target_str, genes_list) print( f"""\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}""" )
107
1
class A_ : def __init__( self : Dict , UpperCAmelCase : int , UpperCAmelCase : Dict , UpperCAmelCase : List[Any] ) -> Optional[int]: __lowerCAmelCase: Dict = name __lowerCAmelCase: Union[str, Any] = value __lowerCAmelCase: str = weight def __repr__( self : Dict ) -> Any: return F'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})''' def UpperCAmelCase ( self : int ) -> int: return self.value def UpperCAmelCase ( self : Tuple ) -> Optional[Any]: return self.name def UpperCAmelCase ( self : str ) -> Optional[Any]: return self.weight def UpperCAmelCase ( self : Any ) -> List[str]: return self.value / self.weight def _a ( SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ) -> Tuple: """simple docstring""" __lowerCAmelCase: List[str] = [] for i in range(len(SCREAMING_SNAKE_CASE ) ): menu.append(Things(name[i] , value[i] , weight[i] ) ) return menu def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" __lowerCAmelCase: List[Any] = sorted(SCREAMING_SNAKE_CASE , key=SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) __lowerCAmelCase: Optional[int] = [] __lowerCAmelCase: Dict = 0.0, 0.0 for i in range(len(SCREAMING_SNAKE_CASE ) ): if (total_cost + items_copy[i].get_weight()) <= max_cost: result.append(items_copy[i] ) total_cost += items_copy[i].get_weight() total_value += items_copy[i].get_value() return (result, total_value) def _a ( ) -> Tuple: """simple docstring""" pass if __name__ == "__main__": import doctest doctest.testmod()
322
'''simple docstring''' # This script creates a super tiny model that is useful inside tests, when we just want to test that # the machinery works, without needing to the check the quality of the outcomes. # # This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny - # all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and # emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files. # The latter is done by `fsmt-make-super-tiny-model.py`. # # It will be used then as "stas/tiny-wmt19-en-ru" from pathlib import Path import json import tempfile from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES _A : Dict ='''tiny-wmt19-en-ru''' # Build # borrowed from a test _A : List[str] =[ '''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''w</w>''', '''r</w>''', '''t</w>''', '''lo''', '''low''', '''er</w>''', '''low</w>''', '''lowest</w>''', '''newer</w>''', '''wider</w>''', '''<unk>''', ] _A : str =dict(zip(vocab, range(len(vocab)))) _A : List[str] =['''l o 123''', '''lo w 1456''', '''e r</w> 1789''', ''''''] with tempfile.TemporaryDirectory() as tmpdirname: _A : Union[str, Any] =Path(tmpdirname) _A : str =build_dir / VOCAB_FILES_NAMES['''src_vocab_file'''] _A : int =build_dir / VOCAB_FILES_NAMES['''tgt_vocab_file'''] _A : List[Any] =build_dir / VOCAB_FILES_NAMES['''merges_file'''] with open(src_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(tgt_vocab_file, '''w''') as fp: fp.write(json.dumps(vocab_tokens)) with open(merges_file, '''w''') as fp: fp.write('''\n'''.join(merges)) _A : int =FSMTTokenizer( langs=['''en''', '''ru'''], src_vocab_size=len(vocab), tgt_vocab_size=len(vocab), src_vocab_file=src_vocab_file, tgt_vocab_file=tgt_vocab_file, merges_file=merges_file, ) _A : List[str] =FSMTConfig( langs=['''ru''', '''en'''], src_vocab_size=1_000, tgt_vocab_size=1_000, d_model=4, encoder_layers=1, decoder_layers=1, encoder_ffn_dim=4, decoder_ffn_dim=4, encoder_attention_heads=1, decoder_attention_heads=1, ) _A : Union[str, Any] =FSMTForConditionalGeneration(config) print(F'num of params {tiny_model.num_parameters()}') # Test _A : List[str] =tokenizer(['''Making tiny model'''], return_tensors='''pt''') _A : Tuple =tiny_model(**batch) print('''test output:''', len(outputs.logits[0])) # Save tiny_model.half() # makes it smaller tiny_model.save_pretrained(mname_tiny) tokenizer.save_pretrained(mname_tiny) print(F'Generated {mname_tiny}') # Upload # transformers-cli upload tiny-wmt19-en-ru
41
0
"""simple docstring""" import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoImageProcessor, ViTImageProcessor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_image_processing import CustomImageProcessor # noqa E402 A_ = get_tests_dir('''fixtures''') class lowercase( unittest.TestCase ): '''simple docstring''' def UpperCamelCase_ ( self: Optional[Any] ): '''simple docstring''' _snake_case : Dict = mock.Mock() _snake_case : List[str] = 500 _snake_case : Dict = {} _snake_case : Optional[Any] = HTTPError _snake_case : List[Any] = {} # Download this model to make sure it's in the cache. _snake_case : int = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch("""requests.Session.request""", return_value=a_ ) as mock_head: _snake_case : Dict = ViTImageProcessor.from_pretrained("""hf-internal-testing/tiny-random-vit""" ) # This check we did call the fake head request mock_head.assert_called() def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : List[Any] = ViTImageProcessor.from_pretrained( """https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json""" ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' with self.assertRaises(a_ ): # config is in subfolder, the following should not work without specifying the subfolder _snake_case : Union[str, Any] = AutoImageProcessor.from_pretrained("""hf-internal-testing/stable-diffusion-all-variants""" ) _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained( """hf-internal-testing/stable-diffusion-all-variants""", subfolder="""feature_extractor""" ) self.assertIsNotNone(a_ ) @is_staging_test class lowercase( unittest.TestCase ): '''simple docstring''' @classmethod def UpperCamelCase_ ( cls: int ): '''simple docstring''' _snake_case : List[Any] = TOKEN HfFolder.save_token(a_ ) @classmethod def UpperCamelCase_ ( cls: Tuple ): '''simple docstring''' try: delete_repo(token=cls._token, repo_id="""test-image-processor""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""valid_org/test-image-processor-org""" ) except HTTPError: pass try: delete_repo(token=cls._token, repo_id="""test-dynamic-image-processor""" ) except HTTPError: pass def UpperCamelCase_ ( self: int ): '''simple docstring''' _snake_case : Any = ViTImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""test-image-processor""", use_auth_token=self._token ) _snake_case : int = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( a_, repo_id="""test-image-processor""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : Any = ViTImageProcessor.from_pretrained(f"{USER}/test-image-processor" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: Dict ): '''simple docstring''' _snake_case : int = ViTImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""valid_org/test-image-processor""", use_auth_token=self._token ) _snake_case : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) # Reset repo delete_repo(token=self._token, repo_id="""valid_org/test-image-processor""" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: image_processor.save_pretrained( a_, repo_id="""valid_org/test-image-processor-org""", push_to_hub=a_, use_auth_token=self._token ) _snake_case : Tuple = ViTImageProcessor.from_pretrained("""valid_org/test-image-processor-org""" ) for k, v in image_processor.__dict__.items(): self.assertEqual(a_, getattr(a_, a_ ) ) def UpperCamelCase_ ( self: int ): '''simple docstring''' CustomImageProcessor.register_for_auto_class() _snake_case : int = CustomImageProcessor.from_pretrained(a_ ) image_processor.push_to_hub("""test-dynamic-image-processor""", use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( image_processor.auto_map, {"""AutoImageProcessor""": """custom_image_processing.CustomImageProcessor"""}, ) _snake_case : Optional[Any] = AutoImageProcessor.from_pretrained( f"{USER}/test-dynamic-image-processor", trust_remote_code=a_ ) # Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module self.assertEqual(new_image_processor.__class__.__name__, """CustomImageProcessor""" )
132
"""simple docstring""" from typing import Any class lowercase: '''simple docstring''' def __init__( self: Dict, a_: Any ): '''simple docstring''' _snake_case : Dict = data _snake_case : Optional[Any] = None class lowercase: '''simple docstring''' def __init__( self: str ): '''simple docstring''' _snake_case : Any = None def UpperCamelCase_ ( self: Any ): '''simple docstring''' _snake_case : List[Any] = self.head while temp is not None: print(temp.data, end=""" """ ) _snake_case : int = temp.next print() def UpperCamelCase_ ( self: Union[str, Any], a_: Any ): '''simple docstring''' _snake_case : Optional[Any] = Node(a_ ) _snake_case : Union[str, Any] = self.head _snake_case : List[Any] = new_node def UpperCamelCase_ ( self: Tuple, a_: List[str], a_: Union[str, Any] ): '''simple docstring''' if node_data_a == node_data_a: return else: _snake_case : int = self.head while node_a is not None and node_a.data != node_data_a: _snake_case : List[Any] = node_a.next _snake_case : List[Any] = self.head while node_a is not None and node_a.data != node_data_a: _snake_case : List[Any] = node_a.next if node_a is None or node_a is None: return _snake_case , _snake_case : int = node_a.data, node_a.data if __name__ == "__main__": A_ = LinkedList() for i in range(5, 0, -1): ll.push(i) ll.print_list() ll.swap_nodes(1, 4) print('''After swapping''') ll.print_list()
132
1
"""simple docstring""" import copy import os import cva import numpy as np from matplotlib import pyplot as plt class lowerCAmelCase__ : '''simple docstring''' def __init__( self ): _lowerCamelCase : List[Any] = '' _lowerCamelCase : Dict = '' _lowerCamelCase : Any = [] _lowerCamelCase : Tuple = 0 _lowerCamelCase : Optional[int] = 256 _lowerCamelCase : Dict = 0 _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : Tuple = 0 _lowerCamelCase : List[str] = 0 def A_ ( self , lowercase ): _lowerCamelCase : int = cva.imread(lowercase , 0 ) _lowerCamelCase : int = copy.deepcopy(self.img ) _lowerCamelCase, _lowerCamelCase, _lowerCamelCase : Any = plt.hist(self.img.ravel() , 256 , [0, 256] , label='x' ) _lowerCamelCase : Tuple = np.sum(lowercase ) for i in range(len(lowercase ) ): _lowerCamelCase : Optional[int] = x[i] / self.k self.sk += prk _lowerCamelCase : List[str] = (self.L - 1) * self.sk if self.rem != 0: _lowerCamelCase : int = int(last % last ) _lowerCamelCase : Optional[int] = int(last + 1 if self.rem >= 0.5 else last ) self.last_list.append(lowercase ) _lowerCamelCase : int = int(np.ma.count(self.img ) / self.img[1].size ) _lowerCamelCase : Tuple = self.img[1].size for i in range(self.number_of_cols ): for j in range(self.number_of_rows ): _lowerCamelCase : Optional[int] = self.img[j][i] if num != self.last_list[num]: _lowerCamelCase : int = self.last_list[num] cva.imwrite('output_data/output.jpg' , self.img ) def A_ ( self ): plt.hist(self.img.ravel() , 256 , [0, 256] ) def A_ ( self ): cva.imshow('Output-Image' , self.img ) cva.imshow('Input-Image' , self.original_image ) cva.waitKey(5000 ) cva.destroyAllWindows() if __name__ == "__main__": lowercase__ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""") lowercase__ = ConstantStretch() stretcher.stretch(file_path) stretcher.plot_histogram() stretcher.show_image()
96
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowerCamelCase__ = logging.get_logger(__name__) @add_end_docstrings(__magic_name__ ) class A__ ( __magic_name__ ): def __init__( self : int , *a : Dict , **a : Union[str, Any] ): '''simple docstring''' super().__init__(*a , **a ) requires_backends(self , 'vision' ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == 'tf' else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _lowerCamelCase ( self : Dict , a : List[str]=None ): '''simple docstring''' lowerCAmelCase__ : Any = {} if top_k is not None: lowerCAmelCase__ : Tuple = top_k return {}, {}, postprocess_params def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : List[Any] ): '''simple docstring''' return super().__call__(a , **a ) def _lowerCamelCase ( self : Any , a : Optional[Any] ): '''simple docstring''' lowerCAmelCase__ : List[Any] = load_image(a ) lowerCAmelCase__ : Optional[int] = self.image_processor(images=a , return_tensors=self.framework ) return model_inputs def _lowerCamelCase ( self : Optional[int] , a : List[str] ): '''simple docstring''' lowerCAmelCase__ : Optional[Any] = self.model(**a ) return model_outputs def _lowerCamelCase ( self : Optional[Any] , a : List[Any] , a : List[Any]=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: lowerCAmelCase__ : Optional[int] = self.model.config.num_labels if self.framework == "pt": lowerCAmelCase__ : List[Any] = model_outputs.logits.softmax(-1 )[0] lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = probs.topk(a ) elif self.framework == "tf": lowerCAmelCase__ : Any = stable_softmax(model_outputs.logits , axis=-1 )[0] lowerCAmelCase__ : Any = tf.math.top_k(a , k=a ) lowerCAmelCase__ , lowerCAmelCase__ : int = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f'''Unsupported framework: {self.framework}''' ) lowerCAmelCase__ : List[Any] = scores.tolist() lowerCAmelCase__ : List[str] = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(a , a )]
212
0
"""simple docstring""" from typing import Optional from urllib.parse import quote import huggingface_hub as hfh from packaging import version def a_ ( _lowercase , _lowercase , _lowercase = None ): if version.parse(hfh.__version__ ).release < version.parse('''0.11.0''' ).release: # old versions of hfh don't url-encode the file path _UpperCamelCase : Optional[int] = quote(_lowercase ) return hfh.hf_hub_url(_lowercase , _lowercase , repo_type='''dataset''' , revision=_lowercase )
128
"""simple docstring""" import argparse import os import re import packaging.version UpperCamelCase_ ="""examples/""" UpperCamelCase_ ={ """examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""), """init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""), """setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""), """doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""), } UpperCamelCase_ ={ """init""": """src/transformers/__init__.py""", """setup""": """setup.py""", } UpperCamelCase_ ="""README.md""" def a_ ( _lowercase , _lowercase , _lowercase ): with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _UpperCamelCase : Tuple = f.read() _UpperCamelCase , _UpperCamelCase : List[Any] = REPLACE_PATTERNS[pattern] _UpperCamelCase : Optional[Any] = replace.replace('''VERSION''' , _lowercase ) _UpperCamelCase : List[Any] = re_pattern.sub(_lowercase , _lowercase ) with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.write(_lowercase ) def a_ ( _lowercase ): for folder, directories, fnames in os.walk(_lowercase ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove('''research_projects''' ) if "legacy" in directories: directories.remove('''legacy''' ) for fname in fnames: if fname.endswith('''.py''' ): update_version_in_file(os.path.join(_lowercase , _lowercase ) , _lowercase , pattern='''examples''' ) def a_ ( _lowercase , _lowercase=False ): for pattern, fname in REPLACE_FILES.items(): update_version_in_file(_lowercase , _lowercase , _lowercase ) if not patch: update_version_in_examples(_lowercase ) def a_ ( ): _UpperCamelCase : Any = '''🤗 Transformers currently provides the following architectures''' _UpperCamelCase : List[str] = '''1. Want to contribute a new model?''' with open(_lowercase , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f: _UpperCamelCase : List[Any] = f.readlines() # Find the start of the list. _UpperCamelCase : Optional[Any] = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _UpperCamelCase : Any = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith('''1.''' ): _UpperCamelCase : Tuple = lines[index].replace( '''https://huggingface.co/docs/transformers/main/model_doc''' , '''https://huggingface.co/docs/transformers/model_doc''' , ) index += 1 with open(_lowercase , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f: f.writelines(_lowercase ) def a_ ( ): with open(REPLACE_FILES['''init'''] , '''r''' ) as f: _UpperCamelCase : List[Any] = f.read() _UpperCamelCase : List[Any] = REPLACE_PATTERNS['''init'''][0].search(_lowercase ).groups()[0] return packaging.version.parse(_lowercase ) def a_ ( _lowercase=False ): _UpperCamelCase : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' ) if default_version.is_devrelease: _UpperCamelCase : List[str] = default_version.base_version elif patch: _UpperCamelCase : Union[str, Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _UpperCamelCase : str = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _UpperCamelCase : Optional[int] = input(F"""Which version are you releasing? [{default_version}]""" ) if len(_lowercase ) == 0: _UpperCamelCase : str = default_version print(F"""Updating version to {version}.""" ) global_version_update(_lowercase , patch=_lowercase ) if not patch: print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() def a_ ( ): _UpperCamelCase : Any = get_version() _UpperCamelCase : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _UpperCamelCase : Union[str, Any] = current_version.base_version # Check with the user we got that right. _UpperCamelCase : int = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(_lowercase ) == 0: _UpperCamelCase : List[str] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(_lowercase ) print('''Cleaning main README, don\'t forget to run `make fix-copies`.''' ) clean_main_ref_in_model_list() if __name__ == "__main__": UpperCamelCase_ =argparse.ArgumentParser() parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""") parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""") UpperCamelCase_ =parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print("""Nothing to do after a patch :-)""") else: post_release_work()
128
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase_ : Any = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : str = [ 'SEW_PRETRAINED_MODEL_ARCHIVE_LIST', 'SEWForCTC', 'SEWForSequenceClassification', 'SEWModel', 'SEWPreTrainedModel', ] if TYPE_CHECKING: from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_sew import ( SEW_PRETRAINED_MODEL_ARCHIVE_LIST, SEWForCTC, SEWForSequenceClassification, SEWModel, SEWPreTrainedModel, ) else: import sys lowerCamelCase_ : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
286
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCamelCase_ : Any = re.compile(r'\s+') def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" return {"hash": hashlib.mda(re.sub(_UpperCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()} def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : List[str] = [len(_UpperCAmelCase ) for line in example['content'].splitlines()] return {"line_mean": np.mean(_UpperCAmelCase ), "line_max": max(_UpperCAmelCase )} def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : Any = np.mean([c.isalnum() for c in example['content']] ) return {"alpha_frac": alpha_frac} def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" if example["hash"] in uniques: uniques.remove(example['hash'] ) return True else: return False def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 ): """simple docstring""" A_ : Optional[int] = ['auto-generated', 'autogenerated', 'automatically generated'] A_ : List[str] = example['content'].splitlines() for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=5 , _UpperCAmelCase=0.05 ): """simple docstring""" A_ : Any = ['unit tests', 'test file', 'configuration file'] A_ : Dict = example['content'].splitlines() A_ : List[Any] = 0 A_ : str = 0 # first test for _, line in zip(range(_UpperCAmelCase ) , _UpperCAmelCase ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test A_ : Tuple = example['content'].count('\n' ) A_ : Tuple = int(coeff * nlines ) for line in lines: count_config += line.lower().count('config' ) count_test += line.lower().count('test' ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : List[Any] = ['def ', 'class ', 'for ', 'while '] A_ : Tuple = example['content'].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase=4 ): """simple docstring""" A_ : Union[str, Any] = example['content'].splitlines() A_ : Any = 0 for line in lines: counter += line.lower().count('=' ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : Optional[Any] = tokenizer(example['content'] , truncation=_UpperCAmelCase )['input_ids'] A_ : Dict = len(example['content'] ) / len(_UpperCAmelCase ) return {"ratio": ratio} def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" A_ : Any = {} results.update(get_hash(_UpperCAmelCase ) ) results.update(line_stats(_UpperCAmelCase ) ) results.update(alpha_stats(_UpperCAmelCase ) ) results.update(char_token_ratio(_UpperCAmelCase ) ) results.update(is_autogenerated(_UpperCAmelCase ) ) results.update(is_config_or_test(_UpperCAmelCase ) ) results.update(has_no_keywords(_UpperCAmelCase ) ) results.update(has_few_assignments(_UpperCAmelCase ) ) return results def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ): """simple docstring""" if not check_uniques(_UpperCAmelCase , _UpperCAmelCase ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def UpperCAmelCase__ ( _UpperCAmelCase ): """simple docstring""" with open(_UpperCAmelCase , 'rb' ) as f_in: with gzip.open(str(_UpperCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out: shutil.copyfileobj(_UpperCAmelCase , _UpperCAmelCase ) os.unlink(_UpperCAmelCase ) # Settings lowerCamelCase_ : Optional[int] = HfArgumentParser(PreprocessingArguments) lowerCamelCase_ : Optional[Any] = parser.parse_args() if args.num_workers is None: lowerCamelCase_ : int = multiprocessing.cpu_count() lowerCamelCase_ : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCamelCase_ : Tuple = time.time() lowerCamelCase_ : Tuple = load_dataset(args.dataset_name, split='train') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCamelCase_ : List[str] = time.time() lowerCamelCase_ : Optional[int] = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCamelCase_ : int = set(ds.unique('hash')) lowerCamelCase_ : Union[str, Any] = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCamelCase_ : Optional[int] = time.time() lowerCamelCase_ : Tuple = ds.filter(filter, fn_kwargs={'uniques': uniques, 'args': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCamelCase_ : Union[str, Any] = time.time() lowerCamelCase_ , lowerCamelCase_ : str = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCamelCase_ : Tuple = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / 'duplicate_clusters.json', 'w') as f: json.dump(duplicate_clusters, f) lowerCamelCase_ : Optional[Any] = output_dir / 'data' data_dir.mkdir(exist_ok=True) lowerCamelCase_ : List[str] = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCamelCase_ : Optional[int] = str(data_dir / F"file-{file_number+1:012}.json") lowerCamelCase_ : List[str] = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
286
1
import contextlib import os import sqlitea import pytest from datasets import Dataset, Features, Value from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @require_sqlalchemy @pytest.mark.parametrize('keep_in_memory' , [False, True] ) def _snake_case( SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : str ) -> Dict: '''simple docstring''' A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): A__ = SqlDatasetReader( 'dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ , keep_in_memory=SCREAMING_SNAKE_CASE__ ).read() _check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) @require_sqlalchemy @pytest.mark.parametrize( 'features' , [ None, {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}, {'col_1': 'string', 'col_2': 'string', 'col_3': 'string'}, {'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'}, {'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'}, ] , ) def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any ) -> Union[str, Any]: '''simple docstring''' A__ = tmp_path / 'cache' A__ = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'} A__ = features.copy() if features else default_expected_features A__ = ( Features({feature: Value(SCREAMING_SNAKE_CASE__ ) for feature, dtype in features.items()} ) if features is not None else None ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , features=SCREAMING_SNAKE_CASE__ , cache_dir=SCREAMING_SNAKE_CASE__ ).read() _check_sql_dataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) def _snake_case( SCREAMING_SNAKE_CASE__ : List[Any] ) -> Optional[int]: '''simple docstring''' with contextlib.closing(sqlitea.connect(SCREAMING_SNAKE_CASE__ ) ) as con: A__ = con.cursor() cur.execute('SELECT * FROM dataset' ) for row in cur: yield row @require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Dict: '''simple docstring''' A__ = tmp_path / 'cache' A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read() SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=1 ).write() A__ = iter_sql_file(SCREAMING_SNAKE_CASE__ ) A__ = iter_sql_file(SCREAMING_SNAKE_CASE__ ) for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): assert rowa == rowa @require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict ) -> List[str]: '''simple docstring''' A__ = tmp_path / 'cache' A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read() SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=2 ).write() A__ = iter_sql_file(SCREAMING_SNAKE_CASE__ ) A__ = iter_sql_file(SCREAMING_SNAKE_CASE__ ) for rowa, rowa in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): assert rowa == rowa @require_sqlalchemy def _snake_case( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple: '''simple docstring''' A__ = tmp_path / 'cache' A__ = os.path.join(SCREAMING_SNAKE_CASE__ , 'tmp.sql' ) A__ = SqlDatasetReader('dataset' , 'sqlite:///' + sqlite_path , cache_dir=SCREAMING_SNAKE_CASE__ ).read() with pytest.raises(SCREAMING_SNAKE_CASE__ ): SqlDatasetWriter(SCREAMING_SNAKE_CASE__ , 'dataset' , 'sqlite:///' + output_sqlite_path , num_proc=0 ).write()
355
import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_torch_available from transformers.testing_utils import require_torch, torch_device if is_torch_available(): from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments @require_torch class A ( unittest.TestCase ): """simple docstring""" def snake_case__ ( self : List[Any],lowercase_ : str )-> List[Any]: '''simple docstring''' for model_result in results.values(): for batch_size, sequence_length in zip(model_result['bs'],model_result['ss'] ): A__ = model_result['result'][batch_size][sequence_length] self.assertIsNotNone(lowercase_ ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : Dict )-> List[str]: '''simple docstring''' A__ = 'sgugger/tiny-distilbert-classification' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,only_pretrain_model=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,torchscript=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) @unittest.skipIf(torch_device == 'cpu','Cant do half precision' ) def snake_case__ ( self : Any )-> Dict: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,fpaa=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : Any )-> Optional[Any]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = AutoConfig.from_pretrained(lowercase_ ) # set architectures equal to `None` A__ = None A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : Union[str, Any] )-> int: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) @unittest.skipIf(torch_device == 'cpu','Can\'t do half precision' ) def snake_case__ ( self : List[Any] )-> Dict: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],fpaa=lowercase_,multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__ ( self : int )-> Optional[int]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : List[Any] )-> Any: '''simple docstring''' A__ = 'sshleifer/tinier_bart' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result ) self.check_results_dict_not_empty(results.memory_inference_result ) def snake_case__ ( self : List[str] )-> List[str]: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__ ( self : int )-> Union[str, Any]: '''simple docstring''' A__ = 'sshleifer/tinier_bart' A__ = AutoConfig.from_pretrained(lowercase_ ) A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_,configs=[config] ) A__ = benchmark.run() self.check_results_dict_not_empty(results.time_train_result ) self.check_results_dict_not_empty(results.memory_train_result ) def snake_case__ ( self : Optional[Any] )-> Tuple: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' with tempfile.TemporaryDirectory() as tmp_dir: A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,save_to_csv=lowercase_,sequence_lengths=[8],batch_sizes=[1],inference_time_csv_file=os.path.join(lowercase_,'inf_time.csv' ),train_memory_csv_file=os.path.join(lowercase_,'train_mem.csv' ),inference_memory_csv_file=os.path.join(lowercase_,'inf_mem.csv' ),train_time_csv_file=os.path.join(lowercase_,'train_time.csv' ),env_info_csv_file=os.path.join(lowercase_,'env.csv' ),multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) benchmark.run() self.assertTrue(Path(os.path.join(lowercase_,'inf_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'train_time.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'inf_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'train_mem.csv' ) ).exists() ) self.assertTrue(Path(os.path.join(lowercase_,'env.csv' ) ).exists() ) def snake_case__ ( self : Tuple )-> str: '''simple docstring''' A__ = 'sshleifer/tiny-gpt2' def _check_summary_is_not_empty(lowercase_ : Optional[Any] ): self.assertTrue(hasattr(lowercase_,'sequential' ) ) self.assertTrue(hasattr(lowercase_,'cumulative' ) ) self.assertTrue(hasattr(lowercase_,'current' ) ) self.assertTrue(hasattr(lowercase_,'total' ) ) with tempfile.TemporaryDirectory() as tmp_dir: A__ = PyTorchBenchmarkArguments( models=[MODEL_ID],training=lowercase_,inference=lowercase_,sequence_lengths=[8],batch_sizes=[1],log_filename=os.path.join(lowercase_,'log.txt' ),log_print=lowercase_,trace_memory_line_by_line=lowercase_,multi_process=lowercase_,) A__ = PyTorchBenchmark(lowercase_ ) A__ = benchmark.run() _check_summary_is_not_empty(result.inference_summary ) _check_summary_is_not_empty(result.train_summary ) self.assertTrue(Path(os.path.join(lowercase_,'log.txt' ) ).exists() )
282
0
'''simple docstring''' import unittest from transformers import AutoTokenizer, NystromformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, NystromformerModel, ) from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST class _a : def __init__( self : Optional[Any] , lowercase : int , lowercase : str=13 , lowercase : Any=7 , lowercase : str=True , lowercase : int=True , lowercase : int=True , lowercase : Any=True , lowercase : Any=99 , lowercase : Any=32 , lowercase : Dict=5 , lowercase : Optional[int]=4 , lowercase : Dict=37 , lowercase : int="gelu" , lowercase : Union[str, Any]=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : str=512 , lowercase : Tuple=16 , lowercase : List[str]=2 , lowercase : str=0.02 , lowercase : str=3 , lowercase : Dict=4 , lowercase : int=None , ): '''simple docstring''' UpperCAmelCase = parent UpperCAmelCase = batch_size UpperCAmelCase = seq_length UpperCAmelCase = is_training UpperCAmelCase = use_input_mask UpperCAmelCase = use_token_type_ids UpperCAmelCase = use_labels UpperCAmelCase = vocab_size UpperCAmelCase = hidden_size UpperCAmelCase = num_hidden_layers UpperCAmelCase = num_attention_heads UpperCAmelCase = intermediate_size UpperCAmelCase = hidden_act UpperCAmelCase = hidden_dropout_prob UpperCAmelCase = attention_probs_dropout_prob UpperCAmelCase = max_position_embeddings UpperCAmelCase = type_vocab_size UpperCAmelCase = type_sequence_label_size UpperCAmelCase = initializer_range UpperCAmelCase = num_labels UpperCAmelCase = num_choices UpperCAmelCase = scope def A ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase = None if self.use_input_mask: UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase = None if self.use_token_type_ids: UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase = None UpperCAmelCase = None UpperCAmelCase = None if self.use_labels: UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def A ( self : Tuple ): '''simple docstring''' return NystromformerConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowercase , initializer_range=self.initializer_range , ) def A ( self : Optional[Any] , lowercase : Union[str, Any] , lowercase : Optional[Any] , lowercase : Tuple , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = NystromformerModel(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase ) UpperCAmelCase = model(lowercase , token_type_ids=lowercase ) UpperCAmelCase = model(lowercase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def A ( self : Union[str, Any] , lowercase : Optional[int] , lowercase : Any , lowercase : str , lowercase : int , lowercase : int , lowercase : Dict , lowercase : int ): '''simple docstring''' UpperCAmelCase = NystromformerForMaskedLM(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def A ( self : Union[str, Any] , lowercase : Optional[Any] , lowercase : Dict , lowercase : Tuple , lowercase : int , lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = NystromformerForQuestionAnswering(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , start_positions=lowercase , end_positions=lowercase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def A ( self : List[str] , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Optional[Any] , lowercase : str , lowercase : Optional[int] , lowercase : Tuple , lowercase : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = NystromformerForSequenceClassification(lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def A ( self : Any , lowercase : str , lowercase : List[Any] , lowercase : str , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Tuple , lowercase : Optional[Any] ): '''simple docstring''' UpperCAmelCase = self.num_labels UpperCAmelCase = NystromformerForTokenClassification(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = model(lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def A ( self : Optional[int] , lowercase : int , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Any , lowercase : Optional[Any] , lowercase : List[str] , lowercase : Dict ): '''simple docstring''' UpperCAmelCase = self.num_choices UpperCAmelCase = NystromformerForMultipleChoice(config=lowercase ) model.to(lowercase ) model.eval() UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase = model( lowercase , attention_mask=lowercase , token_type_ids=lowercase , labels=lowercase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.prepare_config_and_inputs() ( ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ( UpperCAmelCase ) , ) = config_and_inputs UpperCAmelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask} return config, inputs_dict @require_torch class _a ( __a , __a , unittest.TestCase ): __a : Optional[int] = ( ( NystromformerModel, NystromformerForMaskedLM, NystromformerForMultipleChoice, NystromformerForQuestionAnswering, NystromformerForSequenceClassification, NystromformerForTokenClassification, ) if is_torch_available() else () ) __a : List[Any] = ( { """feature-extraction""": NystromformerModel, """fill-mask""": NystromformerForMaskedLM, """question-answering""": NystromformerForQuestionAnswering, """text-classification""": NystromformerForSequenceClassification, """token-classification""": NystromformerForTokenClassification, """zero-shot""": NystromformerForSequenceClassification, } if is_torch_available() else {} ) __a : Optional[int] = False __a : int = False def A ( self : int ): '''simple docstring''' UpperCAmelCase = NystromformerModelTester(self ) UpperCAmelCase = ConfigTester(self , config_class=lowercase , hidden_size=37 ) def A ( self : Tuple ): '''simple docstring''' self.config_tester.run_common_tests() def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowercase ) def A ( self : str ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase = type self.model_tester.create_and_check_model(*lowercase ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*lowercase ) def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*lowercase ) def A ( self : int ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*lowercase ) def A ( self : Tuple ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*lowercase ) def A ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*lowercase ) @slow def A ( self : List[Any] ): '''simple docstring''' for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase = NystromformerModel.from_pretrained(lowercase ) self.assertIsNotNone(lowercase ) @require_torch class _a ( unittest.TestCase ): @slow def A ( self : Dict ): '''simple docstring''' UpperCAmelCase = NystromformerModel.from_pretrained('''uw-madison/nystromformer-512''' ) UpperCAmelCase = torch.tensor([[0, 1, 2, 3, 4, 5]] ) with torch.no_grad(): UpperCAmelCase = model(lowercase )[0] UpperCAmelCase = torch.Size((1, 6, 768) ) self.assertEqual(output.shape , lowercase ) UpperCAmelCase = torch.tensor( [[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , lowercase , atol=1E-4 ) ) @slow def A ( self : List[Any] ): '''simple docstring''' UpperCAmelCase = '''the [MASK] of Belgium is Brussels''' UpperCAmelCase = AutoTokenizer.from_pretrained('''uw-madison/nystromformer-512''' ) UpperCAmelCase = NystromformerForMaskedLM.from_pretrained('''uw-madison/nystromformer-512''' ) UpperCAmelCase = tokenizer(lowercase , return_tensors='''pt''' ) with torch.no_grad(): UpperCAmelCase = model(encoding.input_ids ).logits UpperCAmelCase = token_logits[:, 2, :].argmax(-1 )[0] self.assertEqual(tokenizer.decode(lowercase ) , '''capital''' )
34
'''simple docstring''' import unittest from transformers import EsmConfig, is_torch_available from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.esm.modeling_esmfold import EsmForProteinFolding class a__: def __init__( self : str , __snake_case : Union[str, Any] , __snake_case : List[str]=13 , __snake_case : Tuple=7 , __snake_case : Optional[Any]=False , __snake_case : Dict=True , __snake_case : List[Any]=False , __snake_case : Optional[int]=False , __snake_case : Optional[Any]=19 , __snake_case : Any=32 , __snake_case : Union[str, Any]=5 , __snake_case : Union[str, Any]=4 , __snake_case : int=37 , __snake_case : Union[str, Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : List[str]=0.1 , __snake_case : int=5_12 , __snake_case : int=16 , __snake_case : Tuple=2 , __snake_case : str=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : List[Any]=None , ): a : Tuple = parent a : List[str] = batch_size a : Optional[Any] = seq_length a : Tuple = is_training a : Optional[Any] = use_input_mask a : List[Any] = use_token_type_ids a : List[Any] = use_labels a : int = vocab_size a : Union[str, Any] = hidden_size a : Any = num_hidden_layers a : List[str] = num_attention_heads a : int = intermediate_size a : str = hidden_act a : Tuple = hidden_dropout_prob a : Union[str, Any] = attention_probs_dropout_prob a : List[str] = max_position_embeddings a : Any = type_vocab_size a : List[str] = type_sequence_label_size a : Union[str, Any] = initializer_range a : Optional[int] = num_labels a : Optional[Any] = num_choices a : Optional[int] = scope def lowercase_ ( self : List[Any] ): a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a : Dict = None if self.use_input_mask: a : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) a : Optional[Any] = None a : Optional[int] = None a : Dict = None if self.use_labels: a : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a : List[str] = ids_tensor([self.batch_size] , self.num_choices ) a : Dict = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase_ ( self : List[Any] ): a : Any = EsmConfig( vocab_size=33 , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , is_folding_model=__snake_case , esmfold_config={'trunk': {'num_blocks': 2}, 'fp16_esm': False} , ) return config def lowercase_ ( self : Optional[Any] , __snake_case : int , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : str , __snake_case : Any ): a : Tuple = EsmForProteinFolding(config=__snake_case ).float() model.to(__snake_case ) model.eval() a : Dict = model(__snake_case , attention_mask=__snake_case ) a : Union[str, Any] = model(__snake_case ) a : List[Any] = model(__snake_case ) self.parent.assertEqual(result.positions.shape , (8, self.batch_size, self.seq_length, 14, 3) ) self.parent.assertEqual(result.angles.shape , (8, self.batch_size, self.seq_length, 7, 2) ) def lowercase_ ( self : Optional[Any] ): a : Tuple = self.prepare_config_and_inputs() ( ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ( a ) , ) : Optional[Any] = config_and_inputs a : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask} return config, inputs_dict @require_torch class a__( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase ): lowercase__ = False lowercase__ = (EsmForProteinFolding,) if is_torch_available() else () lowercase__ = () lowercase__ = {} if is_torch_available() else {} lowercase__ = False def lowercase_ ( self : int ): a : Tuple = EsmFoldModelTester(self ) a : Any = ConfigTester(self , config_class=__snake_case , hidden_size=37 ) def lowercase_ ( self : List[str] ): self.config_tester.run_common_tests() def lowercase_ ( self : Union[str, Any] ): a : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*__snake_case ) @unittest.skip('Does not support attention outputs' ) def lowercase_ ( self : str ): pass @unittest.skip def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Optional[int] ): pass @unittest.skip('Esm does not support embedding resizing' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support passing input embeds!' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold does not support head pruning.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMfold does not output hidden states in the normal way.' ) def lowercase_ ( self : int ): pass @unittest.skip('ESMFold only has one output format.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('This test doesn\'t work for ESMFold and doesn\'t test core functionality' ) def lowercase_ ( self : Tuple ): pass @unittest.skip('ESMFold does not support input chunking.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t respect you and it certainly doesn\'t respect your initialization arguments.' ) def lowercase_ ( self : List[Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Union[str, Any] ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : Any ): pass @unittest.skip('ESMFold doesn\'t support torchscript compilation.' ) def lowercase_ ( self : List[str] ): pass @unittest.skip('ESMFold doesn\'t support data parallel.' ) def lowercase_ ( self : Dict ): pass @unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' ) def lowercase_ ( self : Union[str, Any] ): pass @require_torch class a__( lowerCamelCase__ ): @slow def lowercase_ ( self : Optional[int] ): a : Optional[Any] = EsmForProteinFolding.from_pretrained('facebook/esmfold_v1' ).float() model.eval() a : int = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] ) a : Any = model(__snake_case )['positions'] a : Dict = torch.tensor([2.5828, 0.7993, -10.9334] , dtype=torch.floataa ) self.assertTrue(torch.allclose(position_outputs[0, 0, 0, 0] , __snake_case , atol=1e-4 ) )
297
0
import heapq import sys import numpy as np __lowerCAmelCase = tuple[int, int] class __a : def __init__( self ) -> Optional[int]: '''simple docstring''' lowercase__: str = [] lowercase__: List[Any] = set() def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' if not self.empty(): return self.elements[0][0] else: return float('inf' ) def SCREAMING_SNAKE_CASE__ ( self ) -> Any: '''simple docstring''' return len(self.elements ) == 0 def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' if item not in self.set: heapq.heappush(self.elements , (priority, item) ) self.set.add(lowerCAmelCase__ ) else: # update # print("update", item) lowercase__: int = [] (lowercase__): Union[str, Any] = heapq.heappop(self.elements ) while x != item: temp.append((pri, x) ) (lowercase__): Union[str, Any] = heapq.heappop(self.elements ) temp.append((priority, item) ) for pro, xxx in temp: heapq.heappush(self.elements , (pro, xxx) ) def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ ) -> Union[str, Any]: '''simple docstring''' if item in self.set: self.set.remove(lowerCAmelCase__ ) lowercase__: Optional[Any] = [] (lowercase__): Dict = heapq.heappop(self.elements ) while x != item: temp.append((pro, x) ) (lowercase__): Union[str, Any] = heapq.heappop(self.elements ) for prito, yyy in temp: heapq.heappush(self.elements , (prito, yyy) ) def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple: '''simple docstring''' return self.elements[0][1] def SCREAMING_SNAKE_CASE__ ( self ) -> Dict: '''simple docstring''' (lowercase__): str = heapq.heappop(self.elements ) self.set.remove(lowerCAmelCase__ ) return (priority, item) def snake_case_ ( snake_case , snake_case ) -> Optional[int]: # euclidean distance lowercase__: Optional[int] = np.array(snake_case ) lowercase__: List[str] = np.array(snake_case ) return np.linalg.norm(a - b ) def snake_case_ ( snake_case , snake_case ) -> Optional[int]: # integer division by time variable return consistent_heuristic(snake_case , snake_case ) // t def snake_case_ ( snake_case , snake_case ) -> str: # manhattan distance return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] ) def snake_case_ ( snake_case , snake_case , snake_case , snake_case ) -> Dict: lowercase__: Dict = g_function[start] + Wa * heuristics[i](snake_case , snake_case ) return ans def snake_case_ ( snake_case , snake_case , snake_case ) -> Optional[Any]: lowercase__: Tuple = np.chararray((n, n) ) for i in range(snake_case ): for j in range(snake_case ): lowercase__: List[str] = '*' for i in range(snake_case ): for j in range(snake_case ): if (j, (n - 1) - i) in blocks: lowercase__: List[Any] = '#' lowercase__: Tuple = '-' lowercase__: int = back_pointer[goal] while x != start: (lowercase__): Any = x # print(x) lowercase__: Optional[Any] = '-' lowercase__: Any = back_pointer[x] lowercase__: str = '-' for i in range(snake_case ): for j in range(snake_case ): if (i, j) == (0, n - 1): print(grid[i][j] , end=' ' ) print('<-- End position' , end=' ' ) else: print(grid[i][j] , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) print('PATH TAKEN BY THE ALGORITHM IS:-' ) lowercase__: int = back_pointer[goal] while x != start: print(snake_case , end=' ' ) lowercase__: List[Any] = back_pointer[x] print(snake_case ) sys.exit() def snake_case_ ( snake_case ) -> Tuple: if p[0] < 0 or p[0] > n - 1: return False if p[1] < 0 or p[1] > n - 1: return False return True def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) -> Optional[int]: for itera in range(snake_case ): open_list[itera].remove_element(snake_case ) # print("s", s) # print("j", j) (lowercase__): str = s lowercase__: Union[str, Any] = (x - 1, y) lowercase__: Union[str, Any] = (x + 1, y) lowercase__: Union[str, Any] = (x, y + 1) lowercase__: List[Any] = (x, y - 1) for neighbours in [left, right, up, down]: if neighbours not in blocks: if valid(snake_case ) and neighbours not in visited: # print("neighbour", neighbours) visited.add(snake_case ) lowercase__: List[Any] = -1 lowercase__: str = float('inf' ) if valid(snake_case ) and g_function[neighbours] > g_function[s] + 1: lowercase__: int = g_function[s] + 1 lowercase__: str = s if neighbours not in close_list_anchor: open_list[0].put(snake_case , key(snake_case , 0 , snake_case , snake_case ) ) if neighbours not in close_list_inad: for var in range(1 , snake_case ): if key(snake_case , snake_case , snake_case , snake_case ) <= Wa * key( snake_case , 0 , snake_case , snake_case ): open_list[j].put( snake_case , key(snake_case , snake_case , snake_case , snake_case ) ) def snake_case_ ( ) -> Optional[int]: lowercase__: List[Any] = [] for x in range(1 , 5 ): for y in range(1 , 6 ): some_list.append((x, y) ) for x in range(15 , 20 ): some_list.append((x, 17) ) for x in range(10 , 19 ): for y in range(1 , 15 ): some_list.append((x, y) ) # L block for x in range(1 , 4 ): for y in range(12 , 19 ): some_list.append((x, y) ) for x in range(3 , 13 ): for y in range(16 , 19 ): some_list.append((x, y) ) return some_list __lowerCAmelCase = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a} __lowerCAmelCase = [ (0, 1), (1, 1), (2, 1), (3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1), (11, 1), (12, 1), (13, 1), (14, 1), (15, 1), (16, 1), (17, 1), (18, 1), (19, 1), ] __lowerCAmelCase = make_common_ground() __lowerCAmelCase = blocks_blk # hyper parameters __lowerCAmelCase = 1 __lowerCAmelCase = 1 __lowerCAmelCase = 20 __lowerCAmelCase = 3 # one consistent and two other inconsistent # start and end destination __lowerCAmelCase = (0, 0) __lowerCAmelCase = (n - 1, n - 1) __lowerCAmelCase = 1 def snake_case_ ( snake_case , snake_case , snake_case ) -> Optional[int]: lowercase__: Optional[Any] = {start: 0, goal: float('inf' )} lowercase__: List[str] = {start: -1, goal: -1} lowercase__: Optional[int] = [] lowercase__: Tuple = set() for i in range(snake_case ): open_list.append(PriorityQueue() ) open_list[i].put(snake_case , key(snake_case , snake_case , snake_case , snake_case ) ) lowercase__: list[int] = [] lowercase__: list[int] = [] while open_list[0].minkey() < float('inf' ): for i in range(1 , snake_case ): # print(open_list[0].minkey(), open_list[i].minkey()) if open_list[i].minkey() <= Wa * open_list[0].minkey(): global t t += 1 if g_function[goal] <= open_list[i].minkey(): if g_function[goal] < float('inf' ): do_something(snake_case , snake_case , snake_case ) else: lowercase__: Dict = open_list[i].top_show() visited.add(snake_case ) expand_state( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) close_list_inad.append(snake_case ) else: if g_function[goal] <= open_list[0].minkey(): if g_function[goal] < float('inf' ): do_something(snake_case , snake_case , snake_case ) else: lowercase__: str = open_list[0].top_show() visited.add(snake_case ) expand_state( snake_case , 0 , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , ) close_list_anchor.append(snake_case ) print('No path found to goal' ) print() for i in range(n - 1 , -1 , -1 ): for j in range(snake_case ): if (j, i) in blocks: print('#' , end=' ' ) elif (j, i) in back_pointer: if (j, i) == (n - 1, n - 1): print('*' , end=' ' ) else: print('-' , end=' ' ) else: print('*' , end=' ' ) if (j, i) == (n - 1, n - 1): print('<-- End position' , end=' ' ) print() print('^' ) print('Start position' ) print() print('# is an obstacle' ) print('- is the path taken by algorithm' ) if __name__ == "__main__": multi_a_star(start, goal, n_heuristic)
356
import argparse import csv import logging import os import random import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from tqdm import tqdm, trange from transformers import ( CONFIG_NAME, WEIGHTS_NAME, AdamW, OpenAIGPTDoubleHeadsModel, OpenAIGPTTokenizer, get_linear_schedule_with_warmup, ) logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO ) __lowerCAmelCase = logging.getLogger(__name__) def snake_case_ ( snake_case , snake_case ) -> Optional[int]: lowercase__: Optional[int] = np.argmax(snake_case , axis=1 ) return np.sum(outputs == labels ) def snake_case_ ( snake_case ) -> Dict: with open(snake_case , encoding='utf_8' ) as f: lowercase__: str = csv.reader(snake_case ) lowercase__: int = [] next(snake_case ) # skip the first line for line in tqdm(snake_case ): output.append((' '.join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) ) return output def snake_case_ ( snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Tuple: lowercase__: List[Any] = [] for dataset in encoded_datasets: lowercase__: Dict = len(snake_case ) lowercase__: int = np.zeros((n_batch, 2, input_len) , dtype=np.intaa ) lowercase__: int = np.zeros((n_batch, 2) , dtype=np.intaa ) lowercase__: Optional[int] = np.full((n_batch, 2, input_len) , fill_value=-1_00 , dtype=np.intaa ) lowercase__: Optional[Any] = np.zeros((n_batch,) , dtype=np.intaa ) for ( i, (story, conta, conta, mc_label), ) in enumerate(snake_case ): lowercase__: List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowercase__: List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token] lowercase__: Union[str, Any] = with_conta lowercase__: List[Any] = with_conta lowercase__: Any = len(snake_case ) - 1 lowercase__: Dict = len(snake_case ) - 1 lowercase__: Optional[Any] = with_conta lowercase__: Tuple = with_conta lowercase__: int = mc_label lowercase__: Any = (input_ids, mc_token_ids, lm_labels, mc_labels) tensor_datasets.append(tuple(torch.tensor(snake_case ) for t in all_inputs ) ) return tensor_datasets def snake_case_ ( ) -> Union[str, Any]: lowercase__: Optional[Any] = argparse.ArgumentParser() parser.add_argument('--model_name' , type=snake_case , default='openai-gpt' , help='pretrained model name' ) parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' ) parser.add_argument('--do_eval' , action='store_true' , help='Whether to run eval on the dev set.' ) parser.add_argument( '--output_dir' , default=snake_case , type=snake_case , required=snake_case , help='The output directory where the model predictions and checkpoints will be written.' , ) parser.add_argument('--train_dataset' , type=snake_case , default='' ) parser.add_argument('--eval_dataset' , type=snake_case , default='' ) parser.add_argument('--seed' , type=snake_case , default=42 ) parser.add_argument('--num_train_epochs' , type=snake_case , default=3 ) parser.add_argument('--train_batch_size' , type=snake_case , default=8 ) parser.add_argument('--eval_batch_size' , type=snake_case , default=16 ) parser.add_argument('--adam_epsilon' , default=1e-8 , type=snake_case , help='Epsilon for Adam optimizer.' ) parser.add_argument('--max_grad_norm' , type=snake_case , default=1 ) parser.add_argument( '--max_steps' , default=-1 , type=snake_case , help=( 'If > 0: set total number of training steps to perform. Override num_train_epochs.' ) , ) parser.add_argument( '--gradient_accumulation_steps' , type=snake_case , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , ) parser.add_argument('--learning_rate' , type=snake_case , default=6.25e-5 ) parser.add_argument('--warmup_steps' , default=0 , type=snake_case , help='Linear warmup over warmup_steps.' ) parser.add_argument('--lr_schedule' , type=snake_case , default='warmup_linear' ) parser.add_argument('--weight_decay' , type=snake_case , default=0.0_1 ) parser.add_argument('--lm_coef' , type=snake_case , default=0.9 ) parser.add_argument('--n_valid' , type=snake_case , default=3_74 ) parser.add_argument('--server_ip' , type=snake_case , default='' , help='Can be used for distant debugging.' ) parser.add_argument('--server_port' , type=snake_case , default='' , help='Can be used for distant debugging.' ) lowercase__: List[str] = parser.parse_args() print(snake_case ) if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print('Waiting for debugger attach' ) ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=snake_case ) ptvsd.wait_for_attach() random.seed(args.seed ) np.random.seed(args.seed ) torch.manual_seed(args.seed ) torch.cuda.manual_seed_all(args.seed ) lowercase__: Any = torch.device('cuda' if torch.cuda.is_available() else 'cpu' ) lowercase__: Tuple = torch.cuda.device_count() logger.info('device: {}, n_gpu {}'.format(snake_case , snake_case ) ) if not args.do_train and not args.do_eval: raise ValueError('At least one of `do_train` or `do_eval` must be True.' ) if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) # Load tokenizer and model # This loading functions also add new tokens and embeddings called `special tokens` # These new embeddings will be fine-tuned on the RocStories dataset lowercase__: Any = ['_start_', '_delimiter_', '_classify_'] lowercase__: Any = OpenAIGPTTokenizer.from_pretrained(args.model_name ) tokenizer.add_tokens(snake_case ) lowercase__: int = tokenizer.convert_tokens_to_ids(snake_case ) lowercase__: int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name ) model.resize_token_embeddings(len(snake_case ) ) model.to(snake_case ) # Load and encode the datasets def tokenize_and_encode(snake_case ): if isinstance(snake_case , snake_case ): return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(snake_case ) ) elif isinstance(snake_case , snake_case ): return obj return [tokenize_and_encode(snake_case ) for o in obj] logger.info('Encoding dataset...' ) lowercase__: Dict = load_rocstories_dataset(args.train_dataset ) lowercase__: Dict = load_rocstories_dataset(args.eval_dataset ) lowercase__: str = (train_dataset, eval_dataset) lowercase__: Any = tokenize_and_encode(snake_case ) # Compute the max input length for the Transformer lowercase__: Optional[Any] = model.config.n_positions // 2 - 2 lowercase__: Optional[int] = max( len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3 for dataset in encoded_datasets for story, conta, conta, _ in dataset ) lowercase__: List[str] = min(snake_case , model.config.n_positions ) # Max size of input for the pre-trained model # Prepare inputs tensors and dataloaders lowercase__: str = pre_process_datasets(snake_case , snake_case , snake_case , *snake_case ) lowercase__ , lowercase__: Optional[Any] = tensor_datasets[0], tensor_datasets[1] lowercase__: List[str] = TensorDataset(*snake_case ) lowercase__: Dict = RandomSampler(snake_case ) lowercase__: Optional[int] = DataLoader(snake_case , sampler=snake_case , batch_size=args.train_batch_size ) lowercase__: str = TensorDataset(*snake_case ) lowercase__: str = SequentialSampler(snake_case ) lowercase__: Optional[Any] = DataLoader(snake_case , sampler=snake_case , batch_size=args.eval_batch_size ) # Prepare optimizer if args.do_train: if args.max_steps > 0: lowercase__: Union[str, Any] = args.max_steps lowercase__: Tuple = args.max_steps // (len(snake_case ) // args.gradient_accumulation_steps) + 1 else: lowercase__: Optional[Any] = len(snake_case ) // args.gradient_accumulation_steps * args.num_train_epochs lowercase__: str = list(model.named_parameters() ) lowercase__: Any = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] lowercase__: str = [ { 'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )], 'weight_decay': args.weight_decay, }, {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0}, ] lowercase__: Tuple = AdamW(snake_case , lr=args.learning_rate , eps=args.adam_epsilon ) lowercase__: Tuple = get_linear_schedule_with_warmup( snake_case , num_warmup_steps=args.warmup_steps , num_training_steps=snake_case ) if args.do_train: lowercase__ , lowercase__ , lowercase__: int = 0, 0, None model.train() for _ in trange(int(args.num_train_epochs ) , desc='Epoch' ): lowercase__: str = 0 lowercase__: Optional[Any] = 0 lowercase__: List[Any] = tqdm(snake_case , desc='Training' ) for step, batch in enumerate(snake_case ): lowercase__: Union[str, Any] = tuple(t.to(snake_case ) for t in batch ) lowercase__ , lowercase__ , lowercase__ , lowercase__: List[Any] = batch lowercase__: List[str] = model(snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case ) lowercase__: Optional[Any] = args.lm_coef * losses[0] + losses[1] loss.backward() optimizer.step() scheduler.step() optimizer.zero_grad() tr_loss += loss.item() lowercase__: Union[str, Any] = ( loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item() ) nb_tr_steps += 1 lowercase__: Tuple = 'Training loss: {:.2e} lr: {:.2e}'.format(snake_case , scheduler.get_lr()[0] ) # Save a trained model if args.do_train: # Save a trained model, configuration and tokenizer lowercase__: Any = model.module if hasattr(snake_case , 'module' ) else model # Only save the model itself # If we save using the predefined names, we can load using `from_pretrained` lowercase__: Tuple = os.path.join(args.output_dir , snake_case ) lowercase__: List[str] = os.path.join(args.output_dir , snake_case ) torch.save(model_to_save.state_dict() , snake_case ) model_to_save.config.to_json_file(snake_case ) tokenizer.save_vocabulary(args.output_dir ) # Load a trained model and vocabulary that you have fine-tuned lowercase__: Optional[Any] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir ) lowercase__: Any = OpenAIGPTTokenizer.from_pretrained(args.output_dir ) model.to(snake_case ) if args.do_eval: model.eval() lowercase__ , lowercase__: Optional[Any] = 0, 0 lowercase__ , lowercase__: List[Any] = 0, 0 for batch in tqdm(snake_case , desc='Evaluating' ): lowercase__: str = tuple(t.to(snake_case ) for t in batch ) lowercase__ , lowercase__ , lowercase__ , lowercase__: Union[str, Any] = batch with torch.no_grad(): lowercase__ , lowercase__ , lowercase__ , lowercase__: Any = model( snake_case , mc_token_ids=snake_case , lm_labels=snake_case , mc_labels=snake_case ) lowercase__: Dict = mc_logits.detach().cpu().numpy() lowercase__: Tuple = mc_labels.to('cpu' ).numpy() lowercase__: Dict = accuracy(snake_case , snake_case ) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0 ) nb_eval_steps += 1 lowercase__: Optional[int] = eval_loss / nb_eval_steps lowercase__: Optional[int] = eval_accuracy / nb_eval_examples lowercase__: int = tr_loss / nb_tr_steps if args.do_train else None lowercase__: Optional[Any] = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss} lowercase__: Dict = os.path.join(args.output_dir , 'eval_results.txt' ) with open(snake_case , 'w' ) as writer: logger.info('***** Eval results *****' ) for key in sorted(result.keys() ): logger.info(' %s = %s' , snake_case , str(result[key] ) ) writer.write('%s = %s\n' % (key, str(result[key] )) ) if __name__ == "__main__": main()
288
0
"""simple docstring""" import os import posixpath import uuid from dataclasses import dataclass from typing import TYPE_CHECKING, Iterable, List, Optional, Tuple, Union import numpy as np import pyarrow as pa import datasets from datasets.arrow_writer import ArrowWriter, ParquetWriter from datasets.config import MAX_SHARD_SIZE from datasets.filesystems import ( is_remote_filesystem, rename, ) from datasets.iterable_dataset import _BaseExamplesIterable from datasets.utils.py_utils import convert_file_size_to_int lowerCAmelCase__ = datasets.utils.logging.get_logger(__name__) if TYPE_CHECKING: import pyspark @dataclass class __snake_case ( datasets.BuilderConfig): snake_case__ : Optional[datasets.Features] = None def snake_case_ ( A_ : "pyspark.sql.DataFrame", A_ : List[int], ): '''simple docstring''' import pyspark def generate_fn(): _lowerCamelCase : Any = df.select('''*''', pyspark.sql.functions.spark_partition_id().alias('''part_id''' ) ) for partition_id in partition_order: _lowerCamelCase : List[str] = df_with_partition_id.select('''*''' ).where(F'''part_id = {partition_id}''' ).drop('''part_id''' ) _lowerCamelCase : Optional[Any] = partition_df.collect() _lowerCamelCase : int = 0 for row in rows: yield F'''{partition_id}_{row_id}''', row.asDict() row_id += 1 return generate_fn class __snake_case ( _BaseExamplesIterable): def __init__( self : List[str] , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : List[str]=None , ): """simple docstring""" _lowerCamelCase : int = df _lowerCamelCase : Union[str, Any] = partition_order or range(self.df.rdd.getNumPartitions() ) _lowerCamelCase : str = _generate_iterable_examples(self.df , self.partition_order ) def __iter__( self : Optional[int] ): """simple docstring""" yield from self.generate_examples_fn() def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : np.random.Generator ): """simple docstring""" _lowerCamelCase : List[str] = list(range(self.df.rdd.getNumPartitions() ) ) generator.shuffle(__lowerCAmelCase ) return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : int , __lowerCAmelCase : int ): """simple docstring""" _lowerCamelCase : List[Any] = self.split_shard_indices_by_worker(__lowerCAmelCase , __lowerCAmelCase ) return SparkExamplesIterable(self.df , partition_order=__lowerCAmelCase ) @property def SCREAMING_SNAKE_CASE ( self : Optional[int] ): """simple docstring""" return len(self.partition_order ) class __snake_case ( datasets.DatasetBuilder): snake_case__ : Dict = SparkConfig def __init__( self : str , __lowerCAmelCase : "pyspark.sql.DataFrame" , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , **__lowerCAmelCase : str , ): """simple docstring""" import pyspark _lowerCamelCase : Union[str, Any] = pyspark.sql.SparkSession.builder.getOrCreate() _lowerCamelCase : Tuple = df _lowerCamelCase : Tuple = working_dir super().__init__( cache_dir=__lowerCAmelCase , config_name=str(self.df.semanticHash() ) , **__lowerCAmelCase , ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" def create_cache_and_write_probe(__lowerCAmelCase : Optional[int] ): # makedirs with exist_ok will recursively create the directory. It will not throw an error if directories # already exist. os.makedirs(self._cache_dir , exist_ok=__lowerCAmelCase ) _lowerCamelCase : str = os.path.join(self._cache_dir , '''fs_test''' + uuid.uuida().hex ) # Opening the file in append mode will create a new file unless it already exists, in which case it will not # change the file contents. open(__lowerCAmelCase , '''a''' ) return [probe_file] if self._spark.conf.get('''spark.master''' , '''''' ).startswith('''local''' ): return # If the cluster is multi-node, make sure that the user provided a cache_dir and that it is on an NFS # accessible to the driver. # TODO: Stream batches to the driver using ArrowCollectSerializer instead of throwing an error. if self._cache_dir: _lowerCamelCase : Union[str, Any] = ( self._spark.sparkContext.parallelize(range(1 ) , 1 ).mapPartitions(__lowerCAmelCase ).collect() ) if os.path.isfile(probe[0] ): return raise ValueError( '''When using Dataset.from_spark on a multi-node cluster, the driver and all workers should be able to access cache_dir''' ) def SCREAMING_SNAKE_CASE ( self : str ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : datasets.download.download_manager.DownloadManager ): """simple docstring""" return [datasets.SplitGenerator(name=datasets.Split.TRAIN )] def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : str ): """simple docstring""" import pyspark def get_arrow_batch_size(__lowerCAmelCase : Union[str, Any] ): for batch in it: yield pa.RecordBatch.from_pydict({'''batch_bytes''': [batch.nbytes]} ) _lowerCamelCase : Optional[int] = self.df.count() _lowerCamelCase : Optional[Any] = df_num_rows if df_num_rows <= 1_0_0 else 1_0_0 # Approximate the size of each row (in Arrow format) by averaging over a max-100-row sample. _lowerCamelCase : Union[str, Any] = ( self.df.limit(__lowerCAmelCase ) .repartition(1 ) .mapInArrow(__lowerCAmelCase , '''batch_bytes: long''' ) .agg(pyspark.sql.functions.sum('''batch_bytes''' ).alias('''sample_bytes''' ) ) .collect()[0] .sample_bytes / sample_num_rows ) _lowerCamelCase : Any = approx_bytes_per_row * df_num_rows if approx_total_size > max_shard_size: # Make sure there is at least one row per partition. _lowerCamelCase : Union[str, Any] = min(__lowerCAmelCase , int(approx_total_size / max_shard_size ) ) _lowerCamelCase : List[str] = self.df.repartition(__lowerCAmelCase ) def SCREAMING_SNAKE_CASE ( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : int , ): """simple docstring""" import pyspark _lowerCamelCase : Dict = ParquetWriter if file_format == '''parquet''' else ArrowWriter _lowerCamelCase : Optional[Any] = os.path.join(self._working_dir , os.path.basename(__lowerCAmelCase ) ) if self._working_dir else fpath _lowerCamelCase : Tuple = file_format == '''parquet''' # Define these so that we don't reference self in write_arrow, which will result in a pickling error due to # pickling the SparkContext. _lowerCamelCase : int = self.config.features _lowerCamelCase : Union[str, Any] = self._writer_batch_size _lowerCamelCase : Optional[int] = self._fs.storage_options def write_arrow(__lowerCAmelCase : Optional[int] ): # Within the same SparkContext, no two task attempts will share the same attempt ID. _lowerCamelCase : int = pyspark.TaskContext().taskAttemptId() _lowerCamelCase : Optional[int] = next(__lowerCAmelCase , __lowerCAmelCase ) if first_batch is None: # Some partitions might not receive any data. return pa.RecordBatch.from_arrays( [[task_id], [0], [0]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) _lowerCamelCase : Tuple = 0 _lowerCamelCase : Optional[Any] = writer_class( features=__lowerCAmelCase , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , ) _lowerCamelCase : Dict = pa.Table.from_batches([first_batch] ) writer.write_table(__lowerCAmelCase ) for batch in it: if max_shard_size is not None and writer._num_bytes >= max_shard_size: _lowerCamelCase , _lowerCamelCase : Union[str, Any] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) shard_id += 1 _lowerCamelCase : Dict = writer_class( features=writer._features , path=working_fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , writer_batch_size=__lowerCAmelCase , storage_options=__lowerCAmelCase , embed_local_files=__lowerCAmelCase , ) _lowerCamelCase : int = pa.Table.from_batches([batch] ) writer.write_table(__lowerCAmelCase ) if writer._num_bytes > 0: _lowerCamelCase , _lowerCamelCase : List[str] = writer.finalize() writer.close() yield pa.RecordBatch.from_arrays( [[task_id], [num_examples], [num_bytes]] , names=['''task_id''', '''num_examples''', '''num_bytes'''] , ) if working_fpath != fpath: for file in os.listdir(os.path.dirname(__lowerCAmelCase ) ): _lowerCamelCase : List[Any] = os.path.join(os.path.dirname(__lowerCAmelCase ) , os.path.basename(__lowerCAmelCase ) ) shutil.move(__lowerCAmelCase , __lowerCAmelCase ) _lowerCamelCase : Any = ( self.df.mapInArrow(__lowerCAmelCase , '''task_id: long, num_examples: long, num_bytes: long''' ) .groupBy('''task_id''' ) .agg( pyspark.sql.functions.sum('''num_examples''' ).alias('''total_num_examples''' ) , pyspark.sql.functions.sum('''num_bytes''' ).alias('''total_num_bytes''' ) , pyspark.sql.functions.count('''num_bytes''' ).alias('''num_shards''' ) , pyspark.sql.functions.collect_list('''num_examples''' ).alias('''shard_lengths''' ) , ) .collect() ) for row in stats: yield row.task_id, (row.total_num_examples, row.total_num_bytes, row.num_shards, row.shard_lengths) def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : "datasets.SplitGenerator" , __lowerCAmelCase : str = "arrow" , __lowerCAmelCase : Optional[Union[str, int]] = None , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : List[Any] , ): """simple docstring""" self._validate_cache_dir() _lowerCamelCase : Union[str, Any] = convert_file_size_to_int(max_shard_size or MAX_SHARD_SIZE ) self._repartition_df_if_needed(__lowerCAmelCase ) _lowerCamelCase : str = not is_remote_filesystem(self._fs ) _lowerCamelCase : List[Any] = os.path.join if is_local else posixpath.join _lowerCamelCase : Optional[int] = '''-TTTTT-SSSSS-of-NNNNN''' _lowerCamelCase : List[Any] = f'''{self.name}-{split_generator.name}{SUFFIX}.{file_format}''' _lowerCamelCase : List[Any] = path_join(self._output_dir , __lowerCAmelCase ) _lowerCamelCase : Union[str, Any] = 0 _lowerCamelCase : List[str] = 0 _lowerCamelCase : Any = 0 _lowerCamelCase : List[str] = [] _lowerCamelCase : Union[str, Any] = [] for task_id, content in self._prepare_split_single(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ): ( ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ( _lowerCamelCase ) , ) : Optional[int] = content if num_bytes > 0: total_num_examples += num_examples total_num_bytes += num_bytes total_shards += num_shards task_id_and_num_shards.append((task_id, num_shards) ) all_shard_lengths.extend(__lowerCAmelCase ) _lowerCamelCase : Tuple = total_num_examples _lowerCamelCase : Dict = total_num_bytes # should rename everything at the end logger.debug(f'''Renaming {total_shards} shards.''' ) if total_shards > 1: _lowerCamelCase : Any = all_shard_lengths # Define fs outside of _rename_shard so that we don't reference self in the function, which will result in a # pickling error due to pickling the SparkContext. _lowerCamelCase : Optional[Any] = self._fs # use the -SSSSS-of-NNNNN pattern def _rename_shard( __lowerCAmelCase : int , __lowerCAmelCase : int , __lowerCAmelCase : int , ): rename( __lowerCAmelCase , fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace('''TTTTT-SSSSS''' , f'''{global_shard_id:05d}''' ).replace('''NNNNN''' , f'''{total_shards:05d}''' ) , ) _lowerCamelCase : Tuple = [] _lowerCamelCase : Optional[Any] = 0 for i in range(len(__lowerCAmelCase ) ): _lowerCamelCase , _lowerCamelCase : Union[str, Any] = task_id_and_num_shards[i] for shard_id in range(__lowerCAmelCase ): args.append([task_id, shard_id, global_shard_id] ) global_shard_id += 1 self._spark.sparkContext.parallelize(__lowerCAmelCase , len(__lowerCAmelCase ) ).map(lambda __lowerCAmelCase : _rename_shard(*__lowerCAmelCase ) ).collect() else: # don't use any pattern _lowerCamelCase : List[Any] = 0 _lowerCamelCase : List[str] = task_id_and_num_shards[0][0] self._rename( fpath.replace('''SSSSS''' , f'''{shard_id:05d}''' ).replace('''TTTTT''' , f'''{task_id:05d}''' ) , fpath.replace(__lowerCAmelCase , '''''' ) , ) def SCREAMING_SNAKE_CASE ( self : List[Any] , __lowerCAmelCase : "datasets.SplitGenerator" , ): """simple docstring""" return SparkExamplesIterable(self.df )
72
"""simple docstring""" from __future__ import annotations def snake_case_ ( A_ : str ): '''simple docstring''' return [ord(A_ ) - 96 for elem in plain] def snake_case_ ( A_ : list[int] ): '''simple docstring''' return "".join(chr(elem + 96 ) for elem in encoded ) def snake_case_ ( ): '''simple docstring''' _lowerCamelCase : Dict = encode(input('''-> ''' ).strip().lower() ) print('''Encoded: ''', A_ ) print('''Decoded:''', decode(A_ ) ) if __name__ == "__main__": main()
72
1
'''simple docstring''' from __future__ import annotations from collections.abc import Generator def __lowerCamelCase ( ) -> Generator[int, None, None]: """simple docstring""" A__ : dict[int, int] ={} A__ : int =2 while True: A__ : int =factor_map.pop(__snake_case, __snake_case ) if factor: A__ : List[Any] =factor + prime while x in factor_map: x += factor A__ : str =factor else: A__ : Union[str, Any] =prime yield prime prime += 1 def __lowerCamelCase ( __snake_case : float = 1E10 ) -> int: """simple docstring""" A__ : Optional[Any] =sieve() A__ : int =1 while True: A__ : str =next(__snake_case ) if (2 * prime * n) > limit: return n # Ignore the next prime as the reminder will be 2. next(__snake_case ) n += 2 if __name__ == "__main__": print(solution())
136
'''simple docstring''' import math __snake_case : List[Any] = 10 __snake_case : Dict = 7 __snake_case : str = BALLS_PER_COLOUR * NUM_COLOURS def __lowerCamelCase ( __snake_case : int = 20 ) -> str: """simple docstring""" A__ : Union[str, Any] =math.comb(__snake_case, __snake_case ) A__ : str =math.comb(NUM_BALLS - BALLS_PER_COLOUR, __snake_case ) A__ : Optional[int] =NUM_COLOURS * (1 - missing_colour / total) return f"{result:.9f}" if __name__ == "__main__": print(solution(20))
136
1
"""simple docstring""" import itertools from dataclasses import dataclass from typing import Optional import pandas as pd import pyarrow as pa import datasets from datasets.table import table_cast @dataclass class SCREAMING_SNAKE_CASE__ ( datasets.BuilderConfig ): """simple docstring""" a : Optional[datasets.Features] =None class SCREAMING_SNAKE_CASE__ ( datasets.ArrowBasedBuilder ): """simple docstring""" a : Tuple =PandasConfig def lowercase__ ( self ): """simple docstring""" return datasets.DatasetInfo(features=self.config.features ) def lowercase__ ( self , snake_case__ ): """simple docstring""" if not self.config.data_files: raise ValueError(f"""At least one data file must be specified, but got data_files={self.config.data_files}""" ) lowerCAmelCase : Any = dl_manager.download_and_extract(self.config.data_files ) if isinstance(snake_case__ , (str, list, tuple) ): lowerCAmelCase : List[Any] = data_files if isinstance(snake_case__ , snake_case__ ): lowerCAmelCase : Dict = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCAmelCase : List[str] = [dl_manager.iter_files(snake_case__ ) for file in files] return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )] lowerCAmelCase : Optional[Any] = [] for split_name, files in data_files.items(): if isinstance(snake_case__ , snake_case__ ): lowerCAmelCase : Optional[int] = [files] # Use `dl_manager.iter_files` to skip hidden files in an extracted archive lowerCAmelCase : List[Any] = [dl_manager.iter_files(snake_case__ ) for file in files] splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) ) return splits def lowercase__ ( self , snake_case__ ): """simple docstring""" if self.config.features is not None: # more expensive cast to support nested features with keys in a different order # allows str <-> int/float or str to Audio for example lowerCAmelCase : Union[str, Any] = table_cast(snake_case__ , self.config.features.arrow_schema ) return pa_table def lowercase__ ( self , snake_case__ ): """simple docstring""" for i, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ): with open(snake_case__ , "rb" ) as f: lowerCAmelCase : int = pa.Table.from_pandas(pd.read_pickle(snake_case__ ) ) yield i, self._cast_table(snake_case__ )
108
import math def A__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 ) return exponent == int(__lowerCamelCase ) def A__ ( __lowerCamelCase = 1 / 1_23_45 ): SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 0 SCREAMING_SNAKE_CASE_ = 3 while True: SCREAMING_SNAKE_CASE_ = (integer**2 - 1) / 4 # if candidate is an integer, then there is a partition for k if partition_candidate == int(__lowerCamelCase ): SCREAMING_SNAKE_CASE_ = int(__lowerCamelCase ) total_partitions += 1 if check_partition_perfect(__lowerCamelCase ): perfect_partitions += 1 if perfect_partitions > 0: if perfect_partitions / total_partitions < max_proportion: return int(__lowerCamelCase ) integer += 1 if __name__ == "__main__": print(F"""{solution() = }""")
299
0
'''simple docstring''' __UpperCAmelCase =range(2, 2_0 + 1) __UpperCAmelCase =[1_0**k for k in range(ks[-1] + 1)] __UpperCAmelCase ={} def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]: __lowerCamelCase = sum(a_i[j] for j in range(_snake_case , len(_snake_case ) ) ) __lowerCamelCase = sum(a_i[j] * base[j] for j in range(min(len(_snake_case ) , _snake_case ) ) ) __lowerCamelCase = 0, 0 __lowerCamelCase = n - i __lowerCamelCase = memo.get(_snake_case ) if sub_memo is not None: __lowerCamelCase = sub_memo.get(_snake_case ) if jumps is not None and len(_snake_case ) > 0: # find and make the largest jump without going over __lowerCamelCase = -1 for _k in range(len(_snake_case ) - 1 , -1 , -1 ): if jumps[_k][2] <= k and jumps[_k][1] <= max_dn: __lowerCamelCase = _k break if max_jump >= 0: __lowerCamelCase = jumps[max_jump] # since the difference between jumps is cached, add c __lowerCamelCase = diff + c for j in range(min(_snake_case , len(_snake_case ) ) ): __lowerCamelCase = divmod(_snake_case , 10 ) if new_c > 0: add(_snake_case , _snake_case , _snake_case ) else: __lowerCamelCase = [] else: __lowerCamelCase = {c: []} __lowerCamelCase = sub_memo if dn >= max_dn or c + diff >= base[k]: return diff, dn if k > ks[0]: while True: # keep doing smaller jumps __lowerCamelCase = next_term(_snake_case , k - 1 , i + dn , _snake_case ) diff += _diff dn += terms_jumped if dn >= max_dn or c + diff >= base[k]: break else: # would be too small a jump, just compute sequential terms instead __lowerCamelCase = compute(_snake_case , _snake_case , i + dn , _snake_case ) diff += _diff dn += terms_jumped __lowerCamelCase = sub_memo[c] # keep jumps sorted by # of terms skipped __lowerCamelCase = 0 while j < len(_snake_case ): if jumps[j][1] > dn: break j += 1 # cache the jump for this value digitsum(b) and c sub_memo[c].insert(_snake_case , (diff, dn, k) ) return (diff, dn) def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]: if i >= n: return 0, i if k > len(_snake_case ): a_i.extend([0 for _ in range(k - len(_snake_case ) )] ) # note: a_i -> b * 10^k + c # ds_b -> digitsum(b) # ds_c -> digitsum(c) __lowerCamelCase = i __lowerCamelCase = 0, 0, 0 for j in range(len(_snake_case ) ): if j >= k: ds_b += a_i[j] else: ds_c += a_i[j] while i < n: i += 1 __lowerCamelCase = ds_c + ds_b diff += addend __lowerCamelCase = 0 for j in range(_snake_case ): __lowerCamelCase = a_i[j] + addend __lowerCamelCase = divmod(_snake_case , 10 ) ds_c += a_i[j] if addend > 0: break if addend > 0: add(_snake_case , _snake_case , _snake_case ) return diff, i - start_i def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple: for j in range(_snake_case , len(_snake_case ) ): __lowerCamelCase = digits[j] + addend if s >= 10: __lowerCamelCase = divmod(_snake_case , 10 ) __lowerCamelCase = addend // 10 + quotient else: __lowerCamelCase = s __lowerCamelCase = addend // 10 if addend == 0: break while addend > 0: __lowerCamelCase = divmod(_snake_case , 10 ) digits.append(_snake_case ) def __lowerCAmelCase ( UpperCamelCase__ = 10**15 ) -> int: __lowerCamelCase = [1] __lowerCamelCase = 1 __lowerCamelCase = 0 while True: __lowerCamelCase = next_term(_snake_case , 20 , i + dn , _snake_case ) dn += terms_jumped if dn == n - i: break __lowerCamelCase = 0 for j in range(len(_snake_case ) ): a_n += digits[j] * 10**j return a_n if __name__ == "__main__": print(F'{solution() = }')
369
'''simple docstring''' import collections import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging __UpperCAmelCase =logging.get_logger(__name__) __UpperCAmelCase ="▁" __UpperCAmelCase ={"vocab_file": "prophetnet.tokenizer"} __UpperCAmelCase ={ "vocab_file": { "microsoft/xprophetnet-large-wiki100-cased": ( "https://huggingface.co/microsoft/xprophetnet-large-wiki100-cased/resolve/main/prophetnet.tokenizer" ), } } __UpperCAmelCase ={ "microsoft/xprophetnet-large-wiki100-cased": {"do_lower_case": False}, } __UpperCAmelCase ={ "microsoft/xprophetnet-large-wiki100-cased": 5_1_2, } def __lowerCAmelCase ( UpperCamelCase__ ) -> List[str]: __lowerCamelCase = collections.OrderedDict() with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as reader: __lowerCamelCase = reader.readlines() for index, token in enumerate(UpperCamelCase__ ): __lowerCamelCase = token.rstrip('''\n''' ) __lowerCamelCase = index return vocab class a__ ( UpperCAmelCase__ ): lowerCamelCase : Optional[Any] =VOCAB_FILES_NAMES lowerCamelCase : Any =PRETRAINED_VOCAB_FILES_MAP lowerCamelCase : Optional[int] =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES lowerCamelCase : Union[str, Any] =["input_ids", "attention_mask"] def __init__( self : int , a : List[str] , a : Optional[int]="[SEP]" , a : int="[SEP]" , a : str="[SEP]" , a : List[Any]="[UNK]" , a : List[Any]="[PAD]" , a : str="[CLS]" , a : List[str]="[MASK]" , a : Optional[Dict[str, Any]] = None , **a : str , ): """simple docstring""" __lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( bos_token=a , eos_token=a , sep_token=a , unk_token=a , pad_token=a , cls_token=a , mask_token=a , sp_model_kwargs=self.sp_model_kwargs , **a , ) try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(str(a ) ) __lowerCamelCase = vocab_file # Original fairseq vocab and spm vocab must be "aligned": # Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 # -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ---- # fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-' # spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a' # put special tokens and [unused] tokens into the vocab __lowerCamelCase = {'''[PAD]''': 0, '''[CLS]''': 1, '''[SEP]''': 2, '''[UNK]''': 3, '''[MASK]''': 4} for i in range(10 ): __lowerCamelCase = f"""[unused{i}]""" __lowerCamelCase = 5 + i # The first "real" token "," has position 15 in the embedding vocab and position 3 in the spm vocab __lowerCamelCase = 12 __lowerCamelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()} for k in self.fairseq_tokens_to_ids.keys(): self.unique_no_split_tokens.append(a ) def __getstate__( self : List[str] ): """simple docstring""" __lowerCamelCase = self.__dict__.copy() __lowerCamelCase = None return state def __setstate__( self : int , a : List[Any] ): """simple docstring""" __lowerCamelCase = d try: import sentencepiece as spm except ImportError: logger.warning( '''You need to install SentencePiece to use XLMRobertaTokenizer: https://github.com/google/sentencepiece''' ''' pip install sentencepiece''' ) raise # for backward compatibility if not hasattr(self , '''sp_model_kwargs''' ): __lowerCamelCase = {} __lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def SCREAMING_SNAKE_CASE__ ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ): """simple docstring""" if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=a , token_ids_a=a , already_has_special_tokens=a ) if token_ids_a is None: return ([0] * len(a )) + [1] return ([0] * len(a )) + [1] + ([0] * len(a )) + [1] def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ): """simple docstring""" __lowerCamelCase = [self.sep_token_id] if token_ids_a is None: return len(token_ids_a + sep ) * [0] return len(token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" return len(self.sp_model ) + self.fairseq_offset def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ): """simple docstring""" __lowerCamelCase = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : str ): """simple docstring""" return self.sp_model.encode(a , out_type=a ) def SCREAMING_SNAKE_CASE__ ( self : Dict , a : int ): """simple docstring""" if token in self.fairseq_tokens_to_ids: return self.fairseq_tokens_to_ids[token] __lowerCamelCase = self.sp_model.PieceToId(a ) # Need to return unknown token if the SP model returned 0 return spm_id + self.fairseq_offset if spm_id else self.unk_token_id def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Union[str, Any] ): """simple docstring""" if index in self.fairseq_ids_to_tokens: return self.fairseq_ids_to_tokens[index] return self.sp_model.IdToPiece(index - self.fairseq_offset ) def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple ): """simple docstring""" __lowerCamelCase = ''''''.join(a ).replace(a , ''' ''' ).strip() return out_string def SCREAMING_SNAKE_CASE__ ( self : int , a : str , a : Optional[str] = None ): """simple docstring""" if not os.path.isdir(a ): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" ) return __lowerCamelCase = os.path.join( a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(a ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , a ) elif not os.path.isfile(self.vocab_file ): with open(a , '''wb''' ) as fi: __lowerCamelCase = self.sp_model.serialized_model_proto() fi.write(a ) return (out_vocab_file,) def SCREAMING_SNAKE_CASE__ ( self : Any , a : List[int] , a : Optional[List[int]] = None ): """simple docstring""" if token_ids_a is None: return token_ids_a + [self.sep_token_id] __lowerCamelCase = [self.sep_token_id] return token_ids_a + sep + token_ids_a + sep
237
0
"""simple docstring""" import numpy as np from transformers import BatchFeature from transformers.testing_utils import require_tf, require_torch from .test_feature_extraction_common import FeatureExtractionSavingTestMixin class a ( a_ ): # to overwrite at feature extractactor specific tests UpperCAmelCase_ : Optional[int] =None UpperCAmelCase_ : List[Any] =None @property def UpperCamelCase_ ( self ): return self.feat_extract_tester.prepare_feat_extract_dict() def UpperCamelCase_ ( self ): lowercase = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(_lowerCamelCase , 'feature_size' ) ) self.assertTrue(hasattr(_lowerCamelCase , 'sampling_rate' ) ) self.assertTrue(hasattr(_lowerCamelCase , 'padding_value' ) ) def UpperCamelCase_ ( self ): lowercase = self.feat_extract_tester.prepare_inputs_for_common() lowercase = self.feature_extraction_class(**self.feat_extract_dict ) lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} ) self.assertTrue(all(len(_lowerCamelCase ) == len(_lowerCamelCase ) for x, y in zip(_lowerCamelCase , processed_features[input_name] ) ) ) lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='np' ) lowercase = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowercase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_torch def UpperCamelCase_ ( self ): lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) lowercase = self.feature_extraction_class(**self.feat_extract_dict ) lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='pt' ) lowercase = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowercase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) @require_tf def UpperCamelCase_ ( self ): lowercase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=_lowerCamelCase ) lowercase = self.feature_extraction_class(**self.feat_extract_dict ) lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} , tensor_type='tf' ) lowercase = processed_features[input_name] if len(batch_features_input.shape ) < 3: lowercase = batch_features_input[:, :, None] self.assertTrue( batch_features_input.shape == (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) ) def UpperCamelCase_ ( self , _lowerCamelCase=False ): def _inputs_have_equal_length(_lowerCamelCase ): lowercase = len(input[0] ) for input_slice in input[1:]: if len(_lowerCamelCase ) != length: return False return True def _inputs_are_equal(_lowerCamelCase , _lowerCamelCase ): if len(_lowerCamelCase ) != len(_lowerCamelCase ): return False for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ): if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1e-3 ): return False return True lowercase = self.feature_extraction_class(**self.feat_extract_dict ) lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase ) lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} ) lowercase = self.feat_extract_tester.seq_length_diff lowercase = self.feat_extract_tester.max_seq_length + pad_diff lowercase = self.feat_extract_tester.min_seq_length lowercase = self.feat_extract_tester.batch_size lowercase = self.feat_extract_tester.feature_size # test padding for List[int] + numpy lowercase = feat_extract.pad(_lowerCamelCase , padding=_lowerCamelCase ) lowercase = input_a[input_name] lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' ) lowercase = input_a[input_name] lowercase = feat_extract.pad(_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[-1] ) ) lowercase = input_a[input_name] lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' ) lowercase = input_a[input_name] # max_length parameter has to be provided when setting `padding="max_length"` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='max_length' )[input_name] lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=_lowerCamelCase , return_tensors='np' ) lowercase = input_a[input_name] self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) self.assertTrue(len(input_a[0] ) == pad_min_length ) self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff ) self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) ) self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size ) # test padding for `pad_to_multiple_of` for List[int] + numpy lowercase = feat_extract.pad(_lowerCamelCase , pad_to_multiple_of=1_0 ) lowercase = input_a[input_name] lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' , pad_to_multiple_of=1_0 ) lowercase = input_a[input_name] lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=_lowerCamelCase ) lowercase = input_a[input_name] lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , pad_to_multiple_of=1_0 , max_length=_lowerCamelCase , return_tensors='np' , ) lowercase = input_a[input_name] self.assertTrue(all(len(_lowerCamelCase ) % 1_0 == 0 for x in input_a ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) lowercase = pad_max_length if pad_max_length % 1_0 == 0 else (pad_max_length // 1_0 + 1) * 1_0 self.assertTrue(all(len(_lowerCamelCase ) == expected_mult_pad_length for x in input_a ) ) self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) ) if feature_size > 1: self.assertTrue(input_a.shape[2] == feature_size ) # Check padding value is correct lowercase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum() self.assertTrue( abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) ) < 1e-3 ) self.assertTrue( abs( np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum() - padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 ) self.assertTrue( abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) ) < 1e-3 ) def UpperCamelCase_ ( self , _lowerCamelCase=False ): def _inputs_have_equal_length(_lowerCamelCase ): lowercase = len(input[0] ) for input_slice in input[1:]: if len(_lowerCamelCase ) != length: return False return True def _inputs_are_equal(_lowerCamelCase , _lowerCamelCase ): if len(_lowerCamelCase ) != len(_lowerCamelCase ): return False for input_slice_a, input_slice_a in zip(_lowerCamelCase , _lowerCamelCase ): if not np.allclose(np.asarray(_lowerCamelCase ) , np.asarray(_lowerCamelCase ) , atol=1e-3 ): return False return True lowercase = self.feature_extraction_class(**self.feat_extract_dict ) lowercase = self.feat_extract_tester.prepare_inputs_for_common(numpify=_lowerCamelCase ) lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} ) # truncate to smallest lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , truncation=_lowerCamelCase ) lowercase = input_a[input_name] lowercase = feat_extract.pad(_lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) ) lowercase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) # truncate to smallest with np lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' , truncation=_lowerCamelCase , ) lowercase = input_a[input_name] lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , return_tensors='np' ) lowercase = input_a[input_name] self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) # truncate to middle lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase , return_tensors='np' , ) lowercase = input_a[input_name] lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , truncation=_lowerCamelCase ) lowercase = input_a[input_name] lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[1] ) , return_tensors='np' ) lowercase = input_a[input_name] self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(_inputs_are_equal(_lowerCamelCase , _lowerCamelCase ) ) # since truncation forces padding to be smaller than longest input # function can't return `np.ndarray`, but has to return list self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) ) # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , truncation=_lowerCamelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='longest' , truncation=_lowerCamelCase )[input_name] # padding has to be max_length when setting `truncation=True` with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='longest' , truncation=_lowerCamelCase )[input_name] # max_length parameter has to be provided when setting `truncation=True` and padding="max_length" with self.assertRaises(_lowerCamelCase ): feat_extract.pad(_lowerCamelCase , padding='max_length' , truncation=_lowerCamelCase )[input_name] # test truncation for `pad_to_multiple_of` for List[int] + numpy lowercase = 1_2 lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , truncation=_lowerCamelCase , ) lowercase = input_a[input_name] lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=_lowerCamelCase , ) lowercase = input_a[input_name] # retrieve expected_length as multiple of pad_to_multiple_of lowercase = len(speech_inputs[0] ) if expected_length % pad_to_multiple_of != 0: lowercase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of self.assertTrue(len(input_a[0] ) == expected_length ) self.assertTrue(_inputs_have_equal_length(_lowerCamelCase ) ) self.assertFalse(_inputs_have_equal_length(_lowerCamelCase ) ) def UpperCamelCase_ ( self ): self._check_padding(numpify=_lowerCamelCase ) def UpperCamelCase_ ( self ): self._check_padding(numpify=_lowerCamelCase ) def UpperCamelCase_ ( self ): self._check_truncation(numpify=_lowerCamelCase ) def UpperCamelCase_ ( self ): self._check_truncation(numpify=_lowerCamelCase ) @require_torch def UpperCamelCase_ ( self ): lowercase = self.feature_extraction_class(**self.feat_extract_dict ) lowercase = self.feat_extract_tester.prepare_inputs_for_common() lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} ) lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' )[input_name] lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 ) @require_tf def UpperCamelCase_ ( self ): lowercase = self.feature_extraction_class(**self.feat_extract_dict ) lowercase = self.feat_extract_tester.prepare_inputs_for_common() lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} ) lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' )[input_name] lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='tf' )[input_name] self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 ) def UpperCamelCase_ ( self ): lowercase = self.feat_extract_dict lowercase = True lowercase = self.feature_extraction_class(**_lowerCamelCase ) lowercase = self.feat_extract_tester.prepare_inputs_for_common() lowercase = [len(_lowerCamelCase ) for x in speech_inputs] lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} ) lowercase = feat_extract.pad(_lowerCamelCase , padding='longest' , return_tensors='np' ) self.assertIn('attention_mask' , _lowerCamelCase ) self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) ) self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , _lowerCamelCase ) def UpperCamelCase_ ( self ): lowercase = self.feat_extract_dict lowercase = True lowercase = self.feature_extraction_class(**_lowerCamelCase ) lowercase = self.feat_extract_tester.prepare_inputs_for_common() lowercase = [len(_lowerCamelCase ) for x in speech_inputs] lowercase = feat_extract.model_input_names[0] lowercase = BatchFeature({input_name: speech_inputs} ) lowercase = min(_lowerCamelCase ) lowercase = feat_extract.pad( _lowerCamelCase , padding='max_length' , max_length=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='np' ) self.assertIn('attention_mask' , _lowerCamelCase ) self.assertListEqual( list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] ) self.assertListEqual( processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
220
"""simple docstring""" import requests from bsa import BeautifulSoup def _SCREAMING_SNAKE_CASE ( __snake_case : str = "AAPL" ): '''simple docstring''' lowercase = f'https://in.finance.yahoo.com/quote/{symbol}?s={symbol}' lowercase = BeautifulSoup(requests.get(__snake_case ).text , 'html.parser' ) lowercase = 'My(6px) Pos(r) smartphone_Mt(6px)' return soup.find('div' , class_=class_ ).find('span' ).text if __name__ == "__main__": for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split(): print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
220
1
import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase_ ( a__ , a__ , unittest.TestCase ): UpperCAmelCase__ : List[Any] = IFPipeline UpperCAmelCase__ : List[Any] = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"} UpperCAmelCase__ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase__ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"latents"} def snake_case_ ( self ) -> str: return self._get_dummy_components() def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=0 ) -> Any: if str(SCREAMING_SNAKE_CASE_ ).startswith('mps' ): UpperCamelCase : str = torch.manual_seed(SCREAMING_SNAKE_CASE_ ) else: UpperCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[Any] = { 'prompt': 'A painting of a squirrel eating a burger', 'generator': generator, 'num_inference_steps': 2, 'output_type': 'numpy', } return inputs def snake_case_ ( self ) -> Dict: self._test_save_load_optional_components() @unittest.skipIf(torch_device != 'cuda', reason='float16 requires CUDA' ) def snake_case_ ( self ) -> List[str]: # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def snake_case_ ( self ) -> str: self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def snake_case_ ( self ) -> Optional[int]: self._test_save_load_local() def snake_case_ ( self ) -> Any: self._test_inference_batch_single_identical( expected_max_diff=1e-2, ) @unittest.skipIf( torch_device != 'cuda' or not is_xformers_available(), reason='XFormers attention is only available with CUDA and `xformers` installed', ) def snake_case_ ( self ) -> Dict: self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase_ ( unittest.TestCase ): def snake_case_ ( self ) -> str: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def snake_case_ ( self ) -> int: # if UpperCamelCase : Optional[Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0', variant='fp16', torch_dtype=torch.floataa ) UpperCamelCase : List[Any] = IFSuperResolutionPipeline.from_pretrained( 'DeepFloyd/IF-II-L-v1.0', variant='fp16', torch_dtype=torch.floataa, text_encoder=SCREAMING_SNAKE_CASE_, tokenizer=SCREAMING_SNAKE_CASE_ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to('cuda' ) UpperCamelCase , UpperCamelCase : List[Any] = pipe_a.encode_prompt('anime turtle', device='cuda' ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCamelCase : Union[str, Any] = None UpperCamelCase : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCamelCase : Optional[int] = IFImgaImgPipeline(**pipe_a.components ) UpperCamelCase : str = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCamelCase : str = IFInpaintingPipeline(**pipe_a.components ) UpperCamelCase : Optional[Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : Union[str, Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Tuple = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : str = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 13 * 10**9 UpperCamelCase : List[str] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : List[Any] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : str = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Optional[Any] = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[Any]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : Optional[Any] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Optional[Any] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Union[str, Any] = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : str = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : str = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Union[str, Any] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Any = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Optional[int] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def snake_case_ ( self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[int]: # pipeline 1 _start_torch_memory_measurement() UpperCamelCase : List[str] = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = floats_tensor((1, 3, 64, 64), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : Dict = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, num_inference_steps=2, generator=SCREAMING_SNAKE_CASE_, output_type='np', ) UpperCamelCase : Any = output.images[0] assert image.shape == (64, 64, 3) UpperCamelCase : List[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 10 * 10**9 UpperCamelCase : Optional[Any] = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) # pipeline 2 _start_torch_memory_measurement() UpperCamelCase : int = torch.Generator(device='cpu' ).manual_seed(0 ) UpperCamelCase : int = floats_tensor((1, 3, 64, 64), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : List[str] = floats_tensor((1, 3, 256, 256), rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : int = floats_tensor((1, 3, 256, 256), rng=random.Random(1 ) ).to(SCREAMING_SNAKE_CASE_ ) UpperCamelCase : Tuple = pipe_a( prompt_embeds=SCREAMING_SNAKE_CASE_, negative_prompt_embeds=SCREAMING_SNAKE_CASE_, image=SCREAMING_SNAKE_CASE_, mask_image=SCREAMING_SNAKE_CASE_, original_image=SCREAMING_SNAKE_CASE_, generator=SCREAMING_SNAKE_CASE_, num_inference_steps=2, output_type='np', ) UpperCamelCase : Tuple = output.images[0] assert image.shape == (256, 256, 3) UpperCamelCase : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 10**9 UpperCamelCase : Tuple = load_numpy( 'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' ) assert_mean_pixel_difference(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) def UpperCamelCase ( ) -> Union[str, Any]: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
103
from graphs.minimum_spanning_tree_kruskal import kruskal def UpperCamelCase ( ) -> Tuple: UpperCamelCase : List[str] = 9 UpperCamelCase : Optional[Any] = [ [0, 1, 4], [0, 7, 8], [1, 2, 8], [7, 8, 7], [7, 6, 1], [2, 8, 2], [8, 6, 6], [2, 3, 7], [2, 5, 4], [6, 5, 2], [3, 5, 14], [3, 4, 9], [5, 4, 10], [1, 7, 11], ] UpperCamelCase : int = kruskal(snake_case__ , snake_case__ ) UpperCamelCase : List[str] = [ [7, 6, 1], [2, 8, 2], [6, 5, 2], [0, 1, 4], [2, 5, 4], [2, 3, 7], [0, 7, 8], [3, 4, 9], ] assert sorted(snake_case__ ) == sorted(snake_case__ )
103
1
'''simple docstring''' import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class _snake_case : def __init__( self : Dict ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str=13 ,SCREAMING_SNAKE_CASE__ : List[str]=7 ,SCREAMING_SNAKE_CASE__ : Optional[int]=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=True ,SCREAMING_SNAKE_CASE__ : List[str]=True ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=99 ,SCREAMING_SNAKE_CASE__ : Any=64 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=32 ,SCREAMING_SNAKE_CASE__ : Dict=5 ,SCREAMING_SNAKE_CASE__ : int=4 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=37 ,SCREAMING_SNAKE_CASE__ : Tuple="gelu" ,SCREAMING_SNAKE_CASE__ : str=0.1 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.1 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=512 ,SCREAMING_SNAKE_CASE__ : List[Any]=16 ,SCREAMING_SNAKE_CASE__ : Optional[Any]=2 ,SCREAMING_SNAKE_CASE__ : int=0.02 ,SCREAMING_SNAKE_CASE__ : str=3 ,SCREAMING_SNAKE_CASE__ : Dict=4 ,SCREAMING_SNAKE_CASE__ : Dict=None ,): SCREAMING_SNAKE_CASE:Dict = parent SCREAMING_SNAKE_CASE:List[str] = batch_size SCREAMING_SNAKE_CASE:str = seq_length SCREAMING_SNAKE_CASE:List[str] = is_training SCREAMING_SNAKE_CASE:List[str] = use_input_mask SCREAMING_SNAKE_CASE:Optional[int] = use_token_type_ids SCREAMING_SNAKE_CASE:List[str] = use_labels SCREAMING_SNAKE_CASE:Tuple = vocab_size SCREAMING_SNAKE_CASE:Any = hidden_size SCREAMING_SNAKE_CASE:List[Any] = embedding_size SCREAMING_SNAKE_CASE:Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE:List[Any] = num_attention_heads SCREAMING_SNAKE_CASE:List[Any] = intermediate_size SCREAMING_SNAKE_CASE:Dict = hidden_act SCREAMING_SNAKE_CASE:Any = hidden_dropout_prob SCREAMING_SNAKE_CASE:int = attention_probs_dropout_prob SCREAMING_SNAKE_CASE:List[Any] = max_position_embeddings SCREAMING_SNAKE_CASE:Dict = type_vocab_size SCREAMING_SNAKE_CASE:List[Any] = type_sequence_label_size SCREAMING_SNAKE_CASE:str = initializer_range SCREAMING_SNAKE_CASE:Union[str, Any] = num_labels SCREAMING_SNAKE_CASE:Dict = num_choices SCREAMING_SNAKE_CASE:str = scope def __UpperCamelCase ( self : Tuple ): SCREAMING_SNAKE_CASE:List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) SCREAMING_SNAKE_CASE:Union[str, Any] = None if self.use_input_mask: SCREAMING_SNAKE_CASE:Optional[int] = random_attention_mask([self.batch_size, self.seq_length] ) SCREAMING_SNAKE_CASE:str = None if self.use_token_type_ids: SCREAMING_SNAKE_CASE:Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size ) SCREAMING_SNAKE_CASE:Union[str, Any] = None SCREAMING_SNAKE_CASE:List[str] = None SCREAMING_SNAKE_CASE:str = None if self.use_labels: SCREAMING_SNAKE_CASE:str = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) SCREAMING_SNAKE_CASE:Any = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels ) SCREAMING_SNAKE_CASE:Optional[Any] = ids_tensor([self.batch_size] ,self.num_choices ) SCREAMING_SNAKE_CASE:int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCamelCase ( self : Tuple ): return MobileBertConfig( vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,embedding_size=self.embedding_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=SCREAMING_SNAKE_CASE__ ,initializer_range=self.initializer_range ,) def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ): SCREAMING_SNAKE_CASE:Union[str, Any] = MobileBertModel(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:List[str] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Any = model(SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.hidden_size) ) def __UpperCamelCase ( self : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Optional[int] ): SCREAMING_SNAKE_CASE:Optional[Any] = MobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:Optional[Any] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCamelCase ( self : Any ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Dict ): SCREAMING_SNAKE_CASE:str = MobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:Optional[Any] = model( SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, 2) ) def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : Dict ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : int ): SCREAMING_SNAKE_CASE:str = MobileBertForPreTraining(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:int = model( SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,next_sentence_label=SCREAMING_SNAKE_CASE__ ,) self.parent.assertEqual(result.prediction_logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape ,(self.batch_size, 2) ) def __UpperCamelCase ( self : Tuple ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : Tuple ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : str ): SCREAMING_SNAKE_CASE:Optional[Any] = MobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:str = model( SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,start_positions=SCREAMING_SNAKE_CASE__ ,end_positions=SCREAMING_SNAKE_CASE__ ,) self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) ) def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ): SCREAMING_SNAKE_CASE:Dict = self.num_labels SCREAMING_SNAKE_CASE:List[Any] = MobileBertForSequenceClassification(SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:Optional[int] = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) ) def __UpperCamelCase ( self : Optional[int] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[int] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : str ): SCREAMING_SNAKE_CASE:Optional[Any] = self.num_labels SCREAMING_SNAKE_CASE:str = MobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:int = model(SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) ) def __UpperCamelCase ( self : List[str] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Any ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Optional[Any] ,SCREAMING_SNAKE_CASE__ : Tuple ): SCREAMING_SNAKE_CASE:List[str] = self.num_choices SCREAMING_SNAKE_CASE:List[str] = MobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ ) model.to(SCREAMING_SNAKE_CASE__ ) model.eval() SCREAMING_SNAKE_CASE:Optional[int] = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() SCREAMING_SNAKE_CASE:Dict = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() SCREAMING_SNAKE_CASE:str = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous() SCREAMING_SNAKE_CASE:Union[str, Any] = model( SCREAMING_SNAKE_CASE__ ,attention_mask=SCREAMING_SNAKE_CASE__ ,token_type_ids=SCREAMING_SNAKE_CASE__ ,labels=SCREAMING_SNAKE_CASE__ ,) self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) ) def __UpperCamelCase ( self : Optional[Any] ): SCREAMING_SNAKE_CASE:Any = self.prepare_config_and_inputs() ( ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ( SCREAMING_SNAKE_CASE ) , ):List[str] = config_and_inputs SCREAMING_SNAKE_CASE:Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class _snake_case ( _a , _a , unittest.TestCase ): _A : Optional[Any] = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) _A : int = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) _A : int = True def __UpperCamelCase ( self : List[Any] ,SCREAMING_SNAKE_CASE__ : Union[str, Any] ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Dict=False ): SCREAMING_SNAKE_CASE:Any = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,return_labels=SCREAMING_SNAKE_CASE__ ) if return_labels: if model_class in get_values(SCREAMING_SNAKE_CASE__ ): SCREAMING_SNAKE_CASE:Tuple = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:List[str] = torch.zeros( self.model_tester.batch_size ,dtype=torch.long ,device=SCREAMING_SNAKE_CASE__ ) return inputs_dict def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:Tuple = MobileBertModelTester(self ) SCREAMING_SNAKE_CASE:int = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,hidden_size=37 ) def __UpperCamelCase ( self : Optional[Any] ): self.config_tester.run_common_tests() def __UpperCamelCase ( self : List[str] ): SCREAMING_SNAKE_CASE:List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : List[str] ): SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : Any ): SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : int ): SCREAMING_SNAKE_CASE:Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : str ): SCREAMING_SNAKE_CASE:Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : Dict ): SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : List[Any] ): SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE__ ) def __UpperCamelCase ( self : Union[str, Any] ): SCREAMING_SNAKE_CASE:Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE__ ) def A_ ( snake_case ): return torch.tensor( snake_case , dtype=torch.long , device=snake_case , ) A_ = 1e-3 @require_torch @require_sentencepiece @require_tokenizers class _snake_case ( unittest.TestCase ): @slow def __UpperCamelCase ( self : Any ): SCREAMING_SNAKE_CASE:Union[str, Any] = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Any = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] ) with torch.no_grad(): SCREAMING_SNAKE_CASE:Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )[0] SCREAMING_SNAKE_CASE:Dict = torch.Size((1, 9, 512) ) self.assertEqual(output.shape ,SCREAMING_SNAKE_CASE__ ) SCREAMING_SNAKE_CASE:Dict = torch.tensor( [ [ [-2.4_736_526e07, 8.2_691_656e04, 1.6_521_838e05], [-5.7_541_704e-01, 3.9_056_022e00, 4.4_011_507e00], [2.6_047_359e00, 1.5_677_652e00, -1.7_324_188e-01], ] ] ,device=SCREAMING_SNAKE_CASE__ ,) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE SCREAMING_SNAKE_CASE:Optional[int] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) SCREAMING_SNAKE_CASE:List[Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
139
'''simple docstring''' import argparse import math import os from copy import deepcopy import torch from audio_diffusion.models import DiffusionAttnUnetaD from diffusion import sampling from torch import nn from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel A_ = { "gwf-440k": { "url": "https://model-server.zqevans2.workers.dev/gwf-440k.ckpt", "sample_rate": 4_80_00, "sample_size": 6_55_36, }, "jmann-small-190k": { "url": "https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt", "sample_rate": 4_80_00, "sample_size": 6_55_36, }, "jmann-large-580k": { "url": "https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt", "sample_rate": 4_80_00, "sample_size": 13_10_72, }, "maestro-uncond-150k": { "url": "https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt", "sample_rate": 1_60_00, "sample_size": 6_55_36, }, "unlocked-uncond-250k": { "url": "https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt", "sample_rate": 1_60_00, "sample_size": 6_55_36, }, "honk-140k": { "url": "https://model-server.zqevans2.workers.dev/honk-140k.ckpt", "sample_rate": 1_60_00, "sample_size": 6_55_36, }, } def A_ ( snake_case , snake_case ): return torch.atana(snake_case , snake_case ) / math.pi * 2 def A_ ( snake_case ): SCREAMING_SNAKE_CASE:List[Any] = torch.sin(t * math.pi / 2 ) ** 2 SCREAMING_SNAKE_CASE:Any = (1 - sigma**2) ** 0.5 return alpha_sigma_to_t(snake_case , snake_case ) class _snake_case ( _a ): pass class _snake_case ( nn.Module ): def __init__( self : int ,SCREAMING_SNAKE_CASE__ : str ): super().__init__() SCREAMING_SNAKE_CASE:List[Any] = DiffusionAttnUnetaD(SCREAMING_SNAKE_CASE__ ,n_attn_layers=4 ) SCREAMING_SNAKE_CASE:List[str] = deepcopy(self.diffusion ) SCREAMING_SNAKE_CASE:Dict = torch.quasirandom.SobolEngine(1 ,scramble=SCREAMING_SNAKE_CASE__ ) def A_ ( snake_case ): SCREAMING_SNAKE_CASE:List[Any] = MODELS_MAP[model_name]["url"] os.system(F'''wget {url} ./''' ) return F'''./{model_name}.ckpt''' A_ = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", } A_ = { "8": "resnets.0", "9": "attentions.0", "10": "resnets.1", "11": "attentions.1", "12": "resnets.2", "13": "attentions.2", } A_ = { "1": "resnets.0", "2": "attentions.0", "3": "resnets.1", "4": "attentions.1", "5": "resnets.2", "6": "attentions.2", "8": "resnets.3", "9": "attentions.3", "10": "resnets.4", "11": "attentions.4", "12": "resnets.5", "13": "attentions.5", } A_ = { "0": "resnets.0", "1": "resnets.1", "2": "resnets.2", "4": "resnets.0", "5": "resnets.1", "6": "resnets.2", } A_ = { "skip": "conv_skip", "main.0": "conv_1", "main.1": "group_norm_1", "main.3": "conv_2", "main.4": "group_norm_2", } A_ = { "norm": "group_norm", "qkv_proj": ["query", "key", "value"], "out_proj": ["proj_attn"], } def A_ ( snake_case ): if name.startswith("skip" ): return name.replace("skip" , RES_CONV_MAP["skip"] ) # name has to be of format main.{digit} if not name.startswith("main." ): raise ValueError(F'''ResConvBlock error with {name}''' ) return name.replace(name[:6] , RES_CONV_MAP[name[:6]] ) def A_ ( snake_case ): for key, value in ATTN_MAP.items(): if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ): return name.replace(snake_case , snake_case ) elif name.startswith(snake_case ): return [name.replace(snake_case , snake_case ) for v in value] raise ValueError(F'''Attn error with {name}''' ) def A_ ( snake_case , snake_case=13 ): SCREAMING_SNAKE_CASE:Optional[Any] = input_string if string.split("." )[0] == "timestep_embed": return string.replace("timestep_embed" , "time_proj" ) SCREAMING_SNAKE_CASE:List[str] = 0 if string.startswith("net.3." ): depth += 1 SCREAMING_SNAKE_CASE:Union[str, Any] = string[6:] elif string.startswith("net." ): SCREAMING_SNAKE_CASE:int = string[4:] while string.startswith("main.7." ): depth += 1 SCREAMING_SNAKE_CASE:Union[str, Any] = string[7:] if string.startswith("main." ): SCREAMING_SNAKE_CASE:str = string[5:] # mid block if string[:2].isdigit(): SCREAMING_SNAKE_CASE:Tuple = string[:2] SCREAMING_SNAKE_CASE:Optional[Any] = string[2:] else: SCREAMING_SNAKE_CASE:Optional[Any] = string[0] SCREAMING_SNAKE_CASE:Optional[Any] = string[1:] if depth == max_depth: SCREAMING_SNAKE_CASE:Any = MID_NUM_TO_LAYER[layer_num] SCREAMING_SNAKE_CASE:List[str] = "mid_block" elif depth > 0 and int(snake_case ) < 7: SCREAMING_SNAKE_CASE:Union[str, Any] = DOWN_NUM_TO_LAYER[layer_num] SCREAMING_SNAKE_CASE:Dict = F'''down_blocks.{depth}''' elif depth > 0 and int(snake_case ) > 7: SCREAMING_SNAKE_CASE:Any = UP_NUM_TO_LAYER[layer_num] SCREAMING_SNAKE_CASE:Union[str, Any] = F'''up_blocks.{max_depth - depth - 1}''' elif depth == 0: SCREAMING_SNAKE_CASE:Optional[int] = DEPTH_0_TO_LAYER[layer_num] SCREAMING_SNAKE_CASE:Any = F'''up_blocks.{max_depth - 1}''' if int(snake_case ) > 3 else "down_blocks.0" if not string_left.startswith("." ): raise ValueError(F'''Naming error with {input_string} and string_left: {string_left}.''' ) SCREAMING_SNAKE_CASE:List[Any] = string_left[1:] if "resnets" in new_layer: SCREAMING_SNAKE_CASE:List[str] = convert_resconv_naming(snake_case ) elif "attentions" in new_layer: SCREAMING_SNAKE_CASE:List[Any] = convert_attn_naming(snake_case ) SCREAMING_SNAKE_CASE:List[Any] = new_string_left if not isinstance(snake_case , snake_case ): SCREAMING_SNAKE_CASE:Tuple = prefix + "." + new_layer + "." + string_left else: SCREAMING_SNAKE_CASE:int = [prefix + "." + new_layer + "." + s for s in string_left] return new_string def A_ ( snake_case ): SCREAMING_SNAKE_CASE:int = {} for k, v in state_dict.items(): if k.endswith("kernel" ): # up- and downsample layers, don't have trainable weights continue SCREAMING_SNAKE_CASE:str = rename(snake_case ) # check if we need to transform from Conv => Linear for attention if isinstance(snake_case , snake_case ): SCREAMING_SNAKE_CASE:Optional[int] = transform_conv_attns(snake_case , snake_case , snake_case ) else: SCREAMING_SNAKE_CASE:Optional[int] = v return new_state_dict def A_ ( snake_case , snake_case , snake_case ): if len(snake_case ) == 1: if len(v.shape ) == 3: # weight SCREAMING_SNAKE_CASE:List[str] = v[:, :, 0] else: # bias SCREAMING_SNAKE_CASE:Optional[Any] = v else: # qkv matrices SCREAMING_SNAKE_CASE:Optional[int] = v.shape[0] SCREAMING_SNAKE_CASE:Optional[Any] = trippled_shape // 3 for i in range(3 ): if len(v.shape ) == 3: SCREAMING_SNAKE_CASE:Union[str, Any] = v[i * single_shape : (i + 1) * single_shape, :, 0] else: SCREAMING_SNAKE_CASE:List[Any] = v[i * single_shape : (i + 1) * single_shape] return new_state_dict def A_ ( snake_case ): SCREAMING_SNAKE_CASE:Union[str, Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" ) SCREAMING_SNAKE_CASE:List[str] = args.model_path.split("/" )[-1].split("." )[0] if not os.path.isfile(args.model_path ): assert ( model_name == args.model_path ), F'''Make sure to provide one of the official model names {MODELS_MAP.keys()}''' SCREAMING_SNAKE_CASE:List[str] = download(snake_case ) SCREAMING_SNAKE_CASE:List[str] = MODELS_MAP[model_name]["sample_rate"] SCREAMING_SNAKE_CASE:Tuple = MODELS_MAP[model_name]["sample_size"] SCREAMING_SNAKE_CASE:Union[str, Any] = Object() SCREAMING_SNAKE_CASE:int = sample_size SCREAMING_SNAKE_CASE:Any = sample_rate SCREAMING_SNAKE_CASE:List[str] = 0 SCREAMING_SNAKE_CASE:Optional[Any] = UNetaDModel(sample_size=snake_case , sample_rate=snake_case ) SCREAMING_SNAKE_CASE:Optional[Any] = diffusers_model.state_dict() SCREAMING_SNAKE_CASE:Optional[Any] = DiffusionUncond(snake_case ) orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] ) SCREAMING_SNAKE_CASE:Union[str, Any] = orig_model.diffusion_ema.eval() SCREAMING_SNAKE_CASE:Dict = orig_model.state_dict() SCREAMING_SNAKE_CASE:Union[str, Any] = rename_orig_weights(snake_case ) SCREAMING_SNAKE_CASE:Dict = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() ) SCREAMING_SNAKE_CASE:Dict = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() ) assert len(snake_case ) == 0, F'''Problem with {renamed_minus_diffusers}''' assert all(k.endswith("kernel" ) for k in list(snake_case ) ), F'''Problem with {diffusers_minus_renamed}''' for key, value in renamed_state_dict.items(): assert ( diffusers_state_dict[key].squeeze().shape == value.squeeze().shape ), F'''Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}''' if key == "time_proj.weight": SCREAMING_SNAKE_CASE:Dict = value.squeeze() SCREAMING_SNAKE_CASE:Union[str, Any] = value diffusers_model.load_state_dict(snake_case ) SCREAMING_SNAKE_CASE:int = 100 SCREAMING_SNAKE_CASE:int = 33 SCREAMING_SNAKE_CASE:Any = IPNDMScheduler(num_train_timesteps=snake_case ) SCREAMING_SNAKE_CASE:str = torch.manual_seed(snake_case ) SCREAMING_SNAKE_CASE:Union[str, Any] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case ) SCREAMING_SNAKE_CASE:int = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1] SCREAMING_SNAKE_CASE:List[Any] = get_crash_schedule(snake_case ) SCREAMING_SNAKE_CASE:Union[str, Any] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case ) SCREAMING_SNAKE_CASE:Union[str, Any] = torch.manual_seed(33 ) SCREAMING_SNAKE_CASE:Union[str, Any] = pipe(num_inference_steps=snake_case , generator=snake_case ).audios SCREAMING_SNAKE_CASE:Tuple = sampling.iplms_sample(snake_case , snake_case , snake_case , {} ) SCREAMING_SNAKE_CASE:Union[str, Any] = generated.clamp(-1 , 1 ) SCREAMING_SNAKE_CASE:Union[str, Any] = (generated - audio).abs().sum() SCREAMING_SNAKE_CASE:str = (generated - audio).abs().max() if args.save: pipe.save_pretrained(args.checkpoint_path ) print("Diff sum" , snake_case ) print("Diff max" , snake_case ) assert diff_max < 1e-3, F'''Diff max: {diff_max} is too much :-/''' print(F'''Conversion for {model_name} successful!''' ) if __name__ == "__main__": A_ = argparse.ArgumentParser() parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.") parser.add_argument( "--save", default=True, type=bool, required=False, help="Whether to save the converted model or not." ) parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.") A_ = parser.parse_args() main(args)
139
1
import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any]=13 , _lowerCAmelCase : Optional[Any]=7 , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Tuple=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Any=99 , _lowerCAmelCase : Union[str, Any]=16 , _lowerCAmelCase : Optional[Any]=36 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Tuple=6 , _lowerCAmelCase : Optional[Any]=37 , _lowerCAmelCase : Optional[int]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Optional[int]=0.1 , _lowerCAmelCase : Dict=5_12 , _lowerCAmelCase : Tuple=16 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Dict=0.02 , _lowerCAmelCase : Optional[int]=3 , _lowerCAmelCase : Union[str, Any]=4 , _lowerCAmelCase : str=None , ): __snake_case : Any = parent __snake_case : str = batch_size __snake_case : List[str] = seq_length __snake_case : str = is_training __snake_case : Optional[int] = use_input_mask __snake_case : Optional[Any] = use_token_type_ids __snake_case : Optional[Any] = use_labels __snake_case : str = vocab_size __snake_case : int = embedding_size __snake_case : Any = hidden_size __snake_case : List[Any] = num_hidden_layers __snake_case : Tuple = num_hidden_groups __snake_case : List[str] = num_attention_heads __snake_case : List[str] = intermediate_size __snake_case : Dict = hidden_act __snake_case : Optional[int] = hidden_dropout_prob __snake_case : int = attention_probs_dropout_prob __snake_case : Any = max_position_embeddings __snake_case : List[str] = type_vocab_size __snake_case : List[Any] = type_sequence_label_size __snake_case : List[Any] = initializer_range __snake_case : Tuple = num_labels __snake_case : Tuple = num_choices __snake_case : Dict = scope def lowerCAmelCase__ ( self : int ): __snake_case : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) __snake_case : List[str] = None if self.use_input_mask: __snake_case : List[str] = random_attention_mask([self.batch_size, self.seq_length] ) __snake_case : List[Any] = None if self.use_token_type_ids: __snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) __snake_case : List[Any] = None __snake_case : Optional[Any] = None __snake_case : Optional[Any] = None if self.use_labels: __snake_case : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size ) __snake_case : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) __snake_case : str = ids_tensor([self.batch_size] , self.num_choices ) __snake_case : List[Any] = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowerCAmelCase__ ( self : Dict ): return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ): __snake_case : List[Any] = AlbertModel(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) __snake_case : List[Any] = model(_lowerCAmelCase , token_type_ids=_lowerCAmelCase ) __snake_case : Tuple = model(_lowerCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : int ): __snake_case : Union[str, Any] = AlbertForPreTraining(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case : Optional[int] = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , sentence_order_label=_lowerCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def lowerCAmelCase__ ( self : Union[str, Any] , _lowerCAmelCase : int , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : List[Any] ): __snake_case : Union[str, Any] = AlbertForMaskedLM(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case : Union[str, Any] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str ): __snake_case : Tuple = AlbertForQuestionAnswering(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case : Dict = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : int , _lowerCAmelCase : List[str] ): __snake_case : Dict = self.num_labels __snake_case : Optional[Any] = AlbertForSequenceClassification(_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowerCAmelCase__ ( self : Optional[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : str , _lowerCAmelCase : str ): __snake_case : List[str] = self.num_labels __snake_case : Dict = AlbertForTokenClassification(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case : List[str] = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Tuple ): __snake_case : Optional[Any] = self.num_choices __snake_case : Union[str, Any] = AlbertForMultipleChoice(config=_lowerCAmelCase ) model.to(_lowerCAmelCase ) model.eval() __snake_case : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : Optional[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : int = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() __snake_case : List[Any] = model( _lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowerCAmelCase__ ( self : Optional[Any] ): __snake_case : int = self.prepare_config_and_inputs() ( __snake_case ) : Any = config_and_inputs __snake_case : Any = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class SCREAMING_SNAKE_CASE__ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ): A : List[str] = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) A : Any = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) A : Tuple = True def lowerCAmelCase__ ( self : List[str] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[int]=False ): __snake_case : Union[str, Any] = super()._prepare_for_class(_lowerCAmelCase , _lowerCAmelCase , return_labels=_lowerCAmelCase ) if return_labels: if model_class in get_values(_lowerCAmelCase ): __snake_case : Optional[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_lowerCAmelCase ) __snake_case : Any = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_lowerCAmelCase ) return inputs_dict def lowerCAmelCase__ ( self : Optional[Any] ): __snake_case : Dict = AlbertModelTester(self ) __snake_case : Union[str, Any] = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=37 ) def lowerCAmelCase__ ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowerCAmelCase__ ( self : Any ): __snake_case : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : Any ): __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : Optional[Any] ): __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : str ): __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : Dict ): __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : Union[str, Any] ): __snake_case : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_lowerCAmelCase ) def lowerCAmelCase__ ( self : Union[str, Any] ): __snake_case : Tuple = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: __snake_case : List[str] = type self.model_tester.create_and_check_model(*_lowerCAmelCase ) @slow def lowerCAmelCase__ ( self : Tuple ): for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Optional[Any] = AlbertModel.from_pretrained(_lowerCAmelCase ) self.assertIsNotNone(_lowerCAmelCase ) @require_torch class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): @slow def lowerCAmelCase__ ( self : int ): __snake_case : str = AlbertModel.from_pretrained("""albert-base-v2""" ) __snake_case : Optional[Any] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] ) __snake_case : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): __snake_case : Any = model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0] __snake_case : Tuple = torch.Size((1, 11, 7_68) ) self.assertEqual(output.shape , _lowerCAmelCase ) __snake_case : List[str] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1e-4 ) )
363
import json import os import shutil import tempfile import unittest from multiprocessing import get_context from pathlib import Path import datasets import numpy as np from datasets import load_dataset from parameterized import parameterized from transformers import AutoProcessor from transformers.models.wavaveca import WavaVecaCTCTokenizer, WavaVecaFeatureExtractor from transformers.models.wavaveca.tokenization_wavaveca import VOCAB_FILES_NAMES from transformers.testing_utils import require_pyctcdecode, require_torch, require_torchaudio, slow from transformers.utils import FEATURE_EXTRACTOR_NAME, is_pyctcdecode_available, is_torch_available from ..wavaveca.test_feature_extraction_wavaveca import floats_list if is_pyctcdecode_available(): from huggingface_hub import snapshot_download from pyctcdecode import BeamSearchDecoderCTC from transformers.models.wavaveca_with_lm import WavaVecaProcessorWithLM from transformers.models.wavaveca_with_lm.processing_wavaveca_with_lm import WavaVecaDecoderWithLMOutput if is_torch_available(): from transformers import WavaVecaForCTC @require_pyctcdecode class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): def snake_case__ ( self : Any ): __snake_case : Dict = """| <pad> <unk> <s> </s> a b c d e f g h i j k""".split() __snake_case : str = dict(zip(_lowerCAmelCase , range(len(_lowerCAmelCase ) ) ) ) __snake_case : List[str] = { """unk_token""": """<unk>""", """bos_token""": """<s>""", """eos_token""": """</s>""", } __snake_case : str = { """feature_size""": 1, """padding_value""": 0.0, """sampling_rate""": 1_60_00, """return_attention_mask""": False, """do_normalize""": True, } __snake_case : Optional[Any] = tempfile.mkdtemp() __snake_case : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] ) __snake_case : Any = os.path.join(self.tmpdirname , _lowerCAmelCase ) with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) with open(self.feature_extraction_file , """w""" , encoding="""utf-8""" ) as fp: fp.write(json.dumps(_lowerCAmelCase ) + """\n""" ) # load decoder from hub __snake_case : Optional[int] = """hf-internal-testing/ngram-beam-search-decoder""" def snake_case__ ( self : Optional[Any] , **_lowerCAmelCase : Tuple ): __snake_case : int = self.add_kwargs_tokens_map.copy() kwargs.update(_lowerCAmelCase ) return WavaVecaCTCTokenizer.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def snake_case__ ( self : Union[str, Any] , **_lowerCAmelCase : Optional[int] ): return WavaVecaFeatureExtractor.from_pretrained(self.tmpdirname , **_lowerCAmelCase ) def snake_case__ ( self : Dict , **_lowerCAmelCase : Tuple ): return BeamSearchDecoderCTC.load_from_hf_hub(self.decoder_name , **_lowerCAmelCase ) def snake_case__ ( self : List[str] ): shutil.rmtree(self.tmpdirname ) def snake_case__ ( self : Union[str, Any] ): __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : Tuple = self.get_feature_extractor() __snake_case : Dict = self.get_decoder() __snake_case : List[str] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) processor.save_pretrained(self.tmpdirname ) __snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(self.tmpdirname ) # tokenizer self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() ) self.assertIsInstance(processor.tokenizer , _lowerCAmelCase ) # feature extractor self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() ) self.assertIsInstance(processor.feature_extractor , _lowerCAmelCase ) # decoder self.assertEqual(processor.decoder._alphabet.labels , decoder._alphabet.labels ) self.assertEqual( processor.decoder.model_container[decoder._model_key]._unigram_set , decoder.model_container[decoder._model_key]._unigram_set , ) self.assertIsInstance(processor.decoder , _lowerCAmelCase ) def snake_case__ ( self : Tuple ): __snake_case : Tuple = WavaVecaProcessorWithLM( tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) processor.save_pretrained(self.tmpdirname ) # make sure that error is thrown when decoder alphabet doesn't match __snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained( self.tmpdirname , alpha=5.0 , beta=3.0 , score_boundary=-7.0 , unk_score_offset=3 ) # decoder self.assertEqual(processor.language_model.alpha , 5.0 ) self.assertEqual(processor.language_model.beta , 3.0 ) self.assertEqual(processor.language_model.score_boundary , -7.0 ) self.assertEqual(processor.language_model.unk_score_offset , 3 ) def snake_case__ ( self : int ): __snake_case : Tuple = self.get_tokenizer() # add token to trigger raise tokenizer.add_tokens(["""xx"""] ) with self.assertRaisesRegex(_lowerCAmelCase , """include""" ): WavaVecaProcessorWithLM( tokenizer=_lowerCAmelCase , feature_extractor=self.get_feature_extractor() , decoder=self.get_decoder() ) def snake_case__ ( self : Dict ): __snake_case : int = self.get_feature_extractor() __snake_case : str = self.get_tokenizer() __snake_case : Dict = self.get_decoder() __snake_case : Any = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) __snake_case : List[Any] = floats_list((3, 10_00) ) __snake_case : Optional[Any] = feature_extractor(_lowerCAmelCase , return_tensors="""np""" ) __snake_case : Tuple = processor(_lowerCAmelCase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def snake_case__ ( self : Optional[int] ): __snake_case : Any = self.get_feature_extractor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = self.get_decoder() __snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) __snake_case : Optional[int] = """This is a test string""" __snake_case : Union[str, Any] = processor(text=_lowerCAmelCase ) __snake_case : Dict = tokenizer(_lowerCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def snake_case__ ( self : List[str] , _lowerCAmelCase : List[Any]=(2, 10, 16) , _lowerCAmelCase : str=77 ): np.random.seed(_lowerCAmelCase ) return np.random.rand(*_lowerCAmelCase ) def snake_case__ ( self : Tuple ): __snake_case : List[str] = self.get_feature_extractor() __snake_case : List[str] = self.get_tokenizer() __snake_case : List[str] = self.get_decoder() __snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) __snake_case : Optional[int] = self._get_dummy_logits(shape=(10, 16) , seed=13 ) __snake_case : int = processor.decode(_lowerCAmelCase ) __snake_case : Optional[int] = decoder.decode_beams(_lowerCAmelCase )[0] self.assertEqual(decoded_decoder[0] , decoded_processor.text ) self.assertEqual("""</s> <s> </s>""" , decoded_processor.text ) self.assertEqual(decoded_decoder[-2] , decoded_processor.logit_score ) self.assertEqual(decoded_decoder[-1] , decoded_processor.lm_score ) @parameterized.expand([[None], ["""fork"""], ["""spawn"""]] ) def snake_case__ ( self : List[str] , _lowerCAmelCase : List[str] ): __snake_case : int = self.get_feature_extractor() __snake_case : Union[str, Any] = self.get_tokenizer() __snake_case : int = self.get_decoder() __snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) __snake_case : int = self._get_dummy_logits() # note: pool should be instantiated *after* Wav2Vec2ProcessorWithLM. # otherwise, the LM won't be available to the pool's sub-processes. # manual logic used to allow parameterized test for both pool=None and pool=Pool(...) if pool_context is None: __snake_case : Tuple = processor.batch_decode(_lowerCAmelCase ) else: with get_context(_lowerCAmelCase ).Pool() as pool: __snake_case : int = processor.batch_decode(_lowerCAmelCase , _lowerCAmelCase ) __snake_case : int = list(_lowerCAmelCase ) with get_context("""fork""" ).Pool() as p: __snake_case : Tuple = decoder.decode_beams_batch(_lowerCAmelCase , _lowerCAmelCase ) __snake_case , __snake_case , __snake_case : List[Any] = [], [], [] for beams in decoded_beams: texts_decoder.append(beams[0][0] ) logit_scores_decoder.append(beams[0][-2] ) lm_scores_decoder.append(beams[0][-1] ) self.assertListEqual(_lowerCAmelCase , decoded_processor.text ) self.assertListEqual(["""<s> <s> </s>""", """<s> <s> <s>"""] , decoded_processor.text ) self.assertListEqual(_lowerCAmelCase , decoded_processor.logit_score ) self.assertListEqual(_lowerCAmelCase , decoded_processor.lm_score ) def snake_case__ ( self : Optional[int] ): __snake_case : Optional[Any] = self.get_feature_extractor() __snake_case : int = self.get_tokenizer() __snake_case : str = self.get_decoder() __snake_case : Optional[Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) __snake_case : int = self._get_dummy_logits() __snake_case : List[str] = 15 __snake_case : Optional[Any] = -20.0 __snake_case : Tuple = -4.0 __snake_case : List[Any] = processor.batch_decode( _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , ) __snake_case : List[str] = decoded_processor_out.text __snake_case : str = list(_lowerCAmelCase ) with get_context("""fork""" ).Pool() as pool: __snake_case : Dict = decoder.decode_beams_batch( _lowerCAmelCase , _lowerCAmelCase , beam_width=_lowerCAmelCase , beam_prune_logp=_lowerCAmelCase , token_min_logp=_lowerCAmelCase , ) __snake_case : int = [d[0][0] for d in decoded_decoder_out] __snake_case : List[Any] = [d[0][2] for d in decoded_decoder_out] __snake_case : List[Any] = [d[0][3] for d in decoded_decoder_out] self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertListEqual(["""</s> <s> <s>""", """<s> <s> <s>"""] , _lowerCAmelCase ) self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.logit_score ) ) self.assertTrue(np.allclose([-20.054, -18.447] , _lowerCAmelCase , atol=1e-3 ) ) self.assertTrue(np.array_equal(_lowerCAmelCase , decoded_processor_out.lm_score ) ) self.assertTrue(np.allclose([-15.554, -13.9474] , _lowerCAmelCase , atol=1e-3 ) ) def snake_case__ ( self : Any ): __snake_case : List[Any] = self.get_feature_extractor() __snake_case : Any = self.get_tokenizer() __snake_case : Union[str, Any] = self.get_decoder() __snake_case : Dict = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) __snake_case : Any = self._get_dummy_logits() __snake_case : Any = 2.0 __snake_case : int = 5.0 __snake_case : Optional[int] = -20.0 __snake_case : Optional[int] = True __snake_case : Any = processor.batch_decode( _lowerCAmelCase , alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , ) __snake_case : str = decoded_processor_out.text __snake_case : int = list(_lowerCAmelCase ) decoder.reset_params( alpha=_lowerCAmelCase , beta=_lowerCAmelCase , unk_score_offset=_lowerCAmelCase , lm_score_boundary=_lowerCAmelCase , ) with get_context("""fork""" ).Pool() as pool: __snake_case : Tuple = decoder.decode_beams_batch( _lowerCAmelCase , _lowerCAmelCase , ) __snake_case : int = [d[0][0] for d in decoded_decoder_out] self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) self.assertListEqual(["""<s> </s> <s> </s> </s>""", """</s> </s> <s> </s> </s>"""] , _lowerCAmelCase ) __snake_case : List[str] = processor.decoder.model_container[processor.decoder._model_key] self.assertEqual(lm_model.alpha , 2.0 ) self.assertEqual(lm_model.beta , 5.0 ) self.assertEqual(lm_model.unk_score_offset , -20.0 ) self.assertEqual(lm_model.score_boundary , _lowerCAmelCase ) def snake_case__ ( self : Dict ): __snake_case : Optional[Any] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) __snake_case : Union[str, Any] = processor.decoder.model_container[processor.decoder._model_key] __snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() __snake_case : Union[str, Any] = os.listdir(_lowerCAmelCase ) __snake_case : List[str] = ["""alphabet.json""", """language_model"""] downloaded_decoder_files.sort() expected_decoder_files.sort() # test that only decoder relevant files from # https://huggingface.co/hf-internal-testing/processor_with_lm/tree/main # are downloaded and none of the rest (e.g. README.md, ...) self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def snake_case__ ( self : Optional[Any] ): __snake_case : Union[str, Any] = snapshot_download("""hf-internal-testing/processor_with_lm""" ) __snake_case : Dict = WavaVecaProcessorWithLM.from_pretrained(_lowerCAmelCase ) __snake_case : Optional[Any] = processor.decoder.model_container[processor.decoder._model_key] __snake_case : Optional[int] = Path(language_model._kenlm_model.path.decode("""utf-8""" ) ).parent.parent.absolute() __snake_case : List[str] = os.listdir(_lowerCAmelCase ) __snake_case : List[Any] = os.listdir(_lowerCAmelCase ) local_decoder_files.sort() expected_decoder_files.sort() # test that both decoder form hub and local files in cache are the same self.assertListEqual(_lowerCAmelCase , _lowerCAmelCase ) def snake_case__ ( self : Optional[Any] ): __snake_case : Optional[int] = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) __snake_case : str = AutoProcessor.from_pretrained("""hf-internal-testing/processor_with_lm""" ) __snake_case : Optional[int] = floats_list((3, 10_00) ) __snake_case : Union[str, Any] = processor_wavaveca(_lowerCAmelCase , return_tensors="""np""" ) __snake_case : Union[str, Any] = processor_auto(_lowerCAmelCase , return_tensors="""np""" ) for key in input_wavaveca.keys(): self.assertAlmostEqual(input_wavaveca[key].sum() , input_auto[key].sum() , delta=1e-2 ) __snake_case : Dict = self._get_dummy_logits() __snake_case : List[Any] = processor_wavaveca.batch_decode(_lowerCAmelCase ) __snake_case : List[Any] = processor_auto.batch_decode(_lowerCAmelCase ) self.assertListEqual(decoded_wavaveca.text , decoded_auto.text ) def snake_case__ ( self : str ): __snake_case : int = self.get_feature_extractor() __snake_case : List[str] = self.get_tokenizer() __snake_case : Optional[Any] = self.get_decoder() __snake_case : Union[str, Any] = WavaVecaProcessorWithLM(tokenizer=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , decoder=_lowerCAmelCase ) self.assertListEqual( processor.model_input_names , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , ) @staticmethod def snake_case__ ( _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ): __snake_case : Union[str, Any] = [d[key] for d in offsets] return retrieved_list def snake_case__ ( self : Dict ): __snake_case : int = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) __snake_case : List[str] = self._get_dummy_logits()[0] __snake_case : str = processor.decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) ) self.assertEqual(""" """.join(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) ) , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""] , """end_offset""" ) , [1, 3, 5] ) def snake_case__ ( self : List[str] ): __snake_case : Any = WavaVecaProcessorWithLM.from_pretrained("""hf-internal-testing/processor_with_lm""" ) __snake_case : Optional[int] = self._get_dummy_logits() __snake_case : int = processor.batch_decode(_lowerCAmelCase , output_word_offsets=_lowerCAmelCase ) # check Wav2Vec2CTCTokenizerOutput keys for word self.assertEqual(len(outputs.keys() ) , 4 ) self.assertTrue("""text""" in outputs ) self.assertTrue("""word_offsets""" in outputs ) self.assertTrue(isinstance(_lowerCAmelCase , _lowerCAmelCase ) ) self.assertListEqual( [""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) for o in outputs["""word_offsets"""]] , outputs.text ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """word""" ) , ["""<s>""", """<s>""", """</s>"""] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """start_offset""" ) , [0, 2, 4] ) self.assertListEqual(self.get_from_offsets(outputs["""word_offsets"""][0] , """end_offset""" ) , [1, 3, 5] ) @slow @require_torch @require_torchaudio def snake_case__ ( self : Optional[Any] ): import torch __snake_case : Optional[Any] = load_dataset("""common_voice""" , """en""" , split="""train""" , streaming=_lowerCAmelCase ) __snake_case : Any = ds.cast_column("""audio""" , datasets.Audio(sampling_rate=1_60_00 ) ) __snake_case : List[Any] = iter(_lowerCAmelCase ) __snake_case : Optional[int] = next(_lowerCAmelCase ) __snake_case : str = AutoProcessor.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) __snake_case : str = WavaVecaForCTC.from_pretrained("""patrickvonplaten/wav2vec2-base-100h-with-lm""" ) # compare to filename `common_voice_en_100038.mp3` of dataset viewer on https://huggingface.co/datasets/common_voice/viewer/en/train __snake_case : List[str] = processor(sample["""audio"""]["""array"""] , return_tensors="""pt""" ).input_values with torch.no_grad(): __snake_case : Dict = model(_lowerCAmelCase ).logits.cpu().numpy() __snake_case : Any = processor.decode(logits[0] , output_word_offsets=_lowerCAmelCase ) __snake_case : Optional[Any] = model.config.inputs_to_logits_ratio / processor.feature_extractor.sampling_rate __snake_case : Dict = [ { """start_time""": d["""start_offset"""] * time_offset, """end_time""": d["""end_offset"""] * time_offset, """word""": d["""word"""], } for d in output["""word_offsets"""] ] __snake_case : Dict = """WHY DOES MILISANDRA LOOK LIKE SHE WANTS TO CONSUME JOHN SNOW ON THE RIVER AT THE WALL""" # output words self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , _lowerCAmelCase ) self.assertEqual(""" """.join(self.get_from_offsets(_lowerCAmelCase , """word""" ) ) , output.text ) # output times __snake_case : Dict = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """start_time""" ) ) __snake_case : Optional[Any] = torch.tensor(self.get_from_offsets(_lowerCAmelCase , """end_time""" ) ) # fmt: off __snake_case : Optional[Any] = torch.tensor([1.4199, 1.6599, 2.2599, 3.0, 3.24, 3.5999, 3.7999, 4.0999, 4.26, 4.94, 5.28, 5.6599, 5.78, 5.94, 6.32, 6.5399, 6.6599] ) __snake_case : Optional[int] = torch.tensor([1.5399, 1.8999, 2.9, 3.16, 3.5399, 3.72, 4.0199, 4.1799, 4.76, 5.1599, 5.5599, 5.6999, 5.86, 6.1999, 6.38, 6.6199, 6.94] ) # fmt: on self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) ) self.assertTrue(torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=0.01 ) )
20
0
"""simple docstring""" import gc import random import tempfile import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline from diffusers.utils import floats_tensor, nightly, torch_device from diffusers.utils.testing_utils import require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : Dict ) -> int: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() @property def _UpperCamelCase ( self : str ) -> List[str]: _UpperCamelCase = 1 _UpperCamelCase = 3 _UpperCamelCase = (32, 32) _UpperCamelCase = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__UpperCamelCase ) return image @property def _UpperCamelCase ( self : List[Any] ) -> Dict: torch.manual_seed(0 ) _UpperCamelCase = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , ) return model @property def _UpperCamelCase ( self : str ) -> Union[str, Any]: torch.manual_seed(0 ) _UpperCamelCase = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , ) return model @property def _UpperCamelCase ( self : Optional[int] ) -> Any: torch.manual_seed(0 ) _UpperCamelCase = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) return CLIPTextModel(__UpperCamelCase ) @property def _UpperCamelCase ( self : str ) -> Dict: def extract(*__UpperCamelCase : str , **__UpperCamelCase : Dict ): class UpperCAmelCase_ : def __init__( self : List[str] ) -> Optional[Any]: _UpperCamelCase = torch.ones([0] ) def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : Tuple ) -> int: self.pixel_values.to(__UpperCamelCase ) return self return Out() return extract def _UpperCamelCase ( self : Optional[int] ) -> str: _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.dummy_cond_unet _UpperCamelCase = DDIMScheduler( beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='''scaled_linear''' , clip_sample=__UpperCamelCase , set_alpha_to_one=__UpperCamelCase , ) _UpperCamelCase = self.dummy_vae _UpperCamelCase = self.dummy_text_encoder _UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk _UpperCamelCase = StableDiffusionPipeline( unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = """A painting of a squirrel eating a burger""" _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) _UpperCamelCase = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) _UpperCamelCase = output.images _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__UpperCamelCase , )[0] _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCamelCase = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase ( self : Any ) -> Dict: _UpperCamelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCamelCase = self.dummy_cond_unet _UpperCamelCase = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) _UpperCamelCase = self.dummy_vae _UpperCamelCase = self.dummy_text_encoder _UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # make sure here that pndm scheduler skips prk _UpperCamelCase = StableDiffusionPipeline( unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = """A painting of a squirrel eating a burger""" _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) _UpperCamelCase = sd_pipe([prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' ) _UpperCamelCase = output.images _UpperCamelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , return_dict=__UpperCamelCase , )[0] _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCamelCase = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase ( self : int ) -> Optional[int]: _UpperCamelCase = StableDiffusionPipeline.from_pretrained( '''hf-internal-testing/tiny-stable-diffusion-lms-pipe''' , safety_checker=__UpperCamelCase ) assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert isinstance(pipe.scheduler , __UpperCamelCase ) assert pipe.safety_checker is None _UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None # check that there's no error when saving a pipeline with one of the models being None with tempfile.TemporaryDirectory() as tmpdirname: pipe.save_pretrained(__UpperCamelCase ) _UpperCamelCase = StableDiffusionPipeline.from_pretrained(__UpperCamelCase ) # sanity check that the pipeline still works assert pipe.safety_checker is None _UpperCamelCase = pipe('''example prompt''' , num_inference_steps=2 ).images[0] assert image is not None @unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' ) def _UpperCamelCase ( self : List[str] ) -> List[Any]: _UpperCamelCase = self.dummy_cond_unet _UpperCamelCase = PNDMScheduler(skip_prk_steps=__UpperCamelCase ) _UpperCamelCase = self.dummy_vae _UpperCamelCase = self.dummy_text_encoder _UpperCamelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' ) # put models in fp16 _UpperCamelCase = unet.half() _UpperCamelCase = vae.half() _UpperCamelCase = bert.half() # make sure here that pndm scheduler skips prk _UpperCamelCase = StableDiffusionPipeline( unet=__UpperCamelCase , scheduler=__UpperCamelCase , vae=__UpperCamelCase , text_encoder=__UpperCamelCase , tokenizer=__UpperCamelCase , safety_checker=__UpperCamelCase , feature_extractor=self.dummy_extractor , ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = """A painting of a squirrel eating a burger""" _UpperCamelCase = sd_pipe([prompt] , num_inference_steps=2 , output_type='''np''' ).images assert image.shape == (1, 64, 64, 3) @nightly @require_torch_gpu class UpperCAmelCase_ ( unittest.TestCase): def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]: # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def _UpperCamelCase ( self : List[Any] ) -> int: _UpperCamelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__UpperCamelCase ) _UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = ( """portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle""" """ coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with""" """ anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and""" """ children from bahnhof zoo, detailed """ ) _UpperCamelCase = 40_0366_0346 _UpperCamelCase = 7 # without safety guidance (sld_guidance_scale = 0) _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 # without safety guidance (strong configuration) _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase ( self : Optional[Any] ) -> List[Any]: _UpperCamelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' , safety_checker=__UpperCamelCase ) _UpperCamelCase = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = """padme amidala taking a bath artwork, safe for work, no nudity""" _UpperCamelCase = 27_3497_1755 _UpperCamelCase = 7 _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3] assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]: _UpperCamelCase = StableDiffusionPipeline.from_pretrained('''runwayml/stable-diffusion-v1-5''' ) _UpperCamelCase = sd_pipe.to(__UpperCamelCase ) sd_pipe.set_progress_bar_config(disable=__UpperCamelCase ) _UpperCamelCase = ( """the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.""" """ leyendecker""" ) _UpperCamelCase = 10_4435_5234 _UpperCamelCase = 12 _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=0 , ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7 _UpperCamelCase = torch.manual_seed(__UpperCamelCase ) _UpperCamelCase = sd_pipe( [prompt] , generator=__UpperCamelCase , guidance_scale=__UpperCamelCase , num_inference_steps=50 , output_type='''np''' , width=512 , height=512 , sld_guidance_scale=2000 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , ) _UpperCamelCase = output.images _UpperCamelCase = image[0, -3:, -3:, -1] _UpperCamelCase = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] ) assert image.shape == (1, 512, 512, 3) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
256
'''simple docstring''' import unittest import torch from torch import nn from accelerate.test_utils import require_cuda from accelerate.utils.memory import find_executable_batch_size, release_memory def __lowerCamelCase ( ) -> Any: raise RuntimeError("""CUDA out of memory.""" ) class UpperCamelCase_ ( nn.Module ): def __init__( self ) -> Any: super().__init__() UpperCAmelCase : Tuple = nn.Linear(3 , 4 ) UpperCAmelCase : Tuple = nn.BatchNormad(4 ) UpperCAmelCase : int = nn.Linear(4 , 5 ) def _lowercase( self , A ) -> Any: return self.lineara(self.batchnorm(self.lineara(A ) ) ) class UpperCamelCase_ ( unittest.TestCase ): def _lowercase( self ) -> Optional[Any]: UpperCAmelCase : Optional[int] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() mock_training_loop_function() self.assertListEqual(A , [128, 64, 32, 16, 8] ) def _lowercase( self ) -> Any: UpperCAmelCase : Optional[Any] = [] @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A ): nonlocal batch_sizes batch_sizes.append(A ) if batch_size != 8: raise_fake_out_of_memory() return batch_size, arga UpperCAmelCase , UpperCAmelCase : Optional[int] = mock_training_loop_function("""hello""" ) self.assertListEqual(A , [128, 64, 32, 16, 8] ) self.assertListEqual([bs, arga] , [8, """hello"""] ) def _lowercase( self ) -> Any: @find_executable_batch_size(starting_batch_size=0 ) def mock_training_loop_function(A ): pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[int]: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): if batch_size > 0: raise_fake_out_of_memory() pass with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""No executable batch size found, reached zero.""" , cm.exception.args[0] ) def _lowercase( self ) -> Optional[Any]: @find_executable_batch_size(starting_batch_size=128 ) def mock_training_loop_function(A , A , A ): if batch_size != 8: raise raise_fake_out_of_memory() with self.assertRaises(A ) as cm: mock_training_loop_function(128 , """hello""" , """world""" ) self.assertIn("""Batch size was passed into `f`""" , cm.exception.args[0] ) self.assertIn("""`f(arg1='hello', arg2='world')""" , cm.exception.args[0] ) def _lowercase( self ) -> int: @find_executable_batch_size(starting_batch_size=16 ) def mock_training_loop_function(A ): raise ValueError("""Oops, we had an error!""" ) with self.assertRaises(A ) as cm: mock_training_loop_function() self.assertIn("""Oops, we had an error!""" , cm.exception.args[0] ) @require_cuda def _lowercase( self ) -> Optional[int]: UpperCAmelCase : Optional[Any] = torch.cuda.memory_allocated() UpperCAmelCase : List[str] = ModelForTest() model.cuda() self.assertGreater(torch.cuda.memory_allocated() , A ) UpperCAmelCase : Tuple = release_memory(A ) self.assertEqual(torch.cuda.memory_allocated() , A )
265
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCamelCase__ = {'''configuration_yolos''': ['''YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''YolosConfig''', '''YolosOnnxConfig''']} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = ['''YolosFeatureExtractor'''] lowerCamelCase__ = ['''YolosImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase__ = [ '''YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''YolosForObjectDetection''', '''YolosModel''', '''YolosPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys lowerCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
22
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCamelCase__ = logging.get_logger(__name__) lowerCamelCase__ = { '''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''', '''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''', '''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''', '''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''', # See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2 } class __magic_name__ (__lowercase ): lowerCamelCase__ = '''mobilenet_v2''' def __init__( self , _a=3 , _a=224 , _a=1.0 , _a=8 , _a=8 , _a=6 , _a=32 , _a=True , _a=True , _a="relu6" , _a=True , _a=0.8 , _a=0.0_2 , _a=0.0_0_1 , _a=255 , **_a , ) -> Dict: super().__init__(**_a ) if depth_multiplier <= 0: raise ValueError("depth_multiplier must be greater than zero." ) lowerCAmelCase_ = num_channels lowerCAmelCase_ = image_size lowerCAmelCase_ = depth_multiplier lowerCAmelCase_ = depth_divisible_by lowerCAmelCase_ = min_depth lowerCAmelCase_ = expand_ratio lowerCAmelCase_ = output_stride lowerCAmelCase_ = first_layer_is_expansion lowerCAmelCase_ = finegrained_output lowerCAmelCase_ = hidden_act lowerCAmelCase_ = tf_padding lowerCAmelCase_ = classifier_dropout_prob lowerCAmelCase_ = initializer_range lowerCAmelCase_ = layer_norm_eps lowerCAmelCase_ = semantic_loss_ignore_index class __magic_name__ (__lowercase ): lowerCamelCase__ = version.parse('''1.11''' ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: return OrderedDict([("pixel_values", {0: "batch"})] ) @property def __a ( self ) -> Mapping[str, Mapping[int, str]]: if self.task == "image-classification": return OrderedDict([("logits", {0: "batch"})] ) else: return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] ) @property def __a ( self ) -> float: return 1E-4
22
1
import unittest from transformers import SqueezeBertConfig, is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, ) class snake_case__ (_UpperCamelCase ): """simple docstring""" def __init__( self : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=13 , __lowerCamelCase : List[str]=7 , __lowerCamelCase : Union[str, Any]=True , __lowerCamelCase : Dict=True , __lowerCamelCase : Optional[int]=False , __lowerCamelCase : str=True , __lowerCamelCase : List[str]=99 , __lowerCamelCase : List[Any]=32 , __lowerCamelCase : Any=5 , __lowerCamelCase : str=4 , __lowerCamelCase : List[Any]=64 , __lowerCamelCase : Dict="gelu" , __lowerCamelCase : int=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : Union[str, Any]=5_12 , __lowerCamelCase : List[Any]=16 , __lowerCamelCase : str=2 , __lowerCamelCase : Optional[Any]=0.02 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[Any]=4 , __lowerCamelCase : Tuple=None , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : List[Any]=2 , __lowerCamelCase : Optional[Any]=2 , __lowerCamelCase : str=2 , __lowerCamelCase : Dict=4 , __lowerCamelCase : Optional[int]=1 , ) -> Any: a = parent a = batch_size a = seq_length a = is_training a = use_input_mask a = use_token_type_ids a = use_labels a = vocab_size a = hidden_size a = num_hidden_layers a = num_attention_heads a = intermediate_size a = hidden_act a = hidden_dropout_prob a = attention_probs_dropout_prob a = max_position_embeddings a = type_vocab_size a = type_sequence_label_size a = initializer_range a = num_labels a = num_choices a = scope a = q_groups a = k_groups a = v_groups a = post_attention_groups a = intermediate_groups a = output_groups def __UpperCAmelCase ( self : int ) -> Optional[int]: a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) a = None if self.use_input_mask: a = random_attention_mask([self.batch_size, self.seq_length] ) a = None a = None a = None if self.use_labels: a = ids_tensor([self.batch_size] , self.type_sequence_label_size ) a = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) a = ids_tensor([self.batch_size] , self.num_choices ) a = self.get_config() return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def __UpperCAmelCase ( self : Dict ) -> Dict: return SqueezeBertConfig( embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , ) def __UpperCAmelCase ( self : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] ) -> str: a = SqueezeBertModel(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase , __lowerCamelCase ) a = model(__lowerCamelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any , __lowerCamelCase : int , __lowerCamelCase : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : List[Any] ) -> Dict: a = SqueezeBertForMaskedLM(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Dict , __lowerCamelCase : Optional[int] , __lowerCamelCase : str ) -> Union[str, Any]: a = SqueezeBertForQuestionAnswering(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model( __lowerCamelCase , attention_mask=__lowerCamelCase , start_positions=__lowerCamelCase , end_positions=__lowerCamelCase ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __UpperCAmelCase ( self : List[str] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Tuple , __lowerCamelCase : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Any , __lowerCamelCase : str ) -> Any: a = self.num_labels a = SqueezeBertForSequenceClassification(__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __UpperCAmelCase ( self : List[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : int , __lowerCamelCase : str , __lowerCamelCase : str , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple ) -> Any: a = self.num_labels a = SqueezeBertForTokenClassification(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = model(__lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Dict , __lowerCamelCase : Tuple ) -> Optional[int]: a = self.num_choices a = SqueezeBertForMultipleChoice(config=__lowerCamelCase ) model.to(__lowerCamelCase ) model.eval() a = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() a = model( __lowerCamelCase , attention_mask=__lowerCamelCase , labels=__lowerCamelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __UpperCAmelCase ( self : Any ) -> Optional[int]: a = self.prepare_config_and_inputs() ((a) , (a) , (a) , (a) , (a) , (a)) = config_and_inputs a = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class snake_case__ (_UpperCamelCase , _UpperCamelCase , unittest.TestCase ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = ( ( SqueezeBertModel, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, ) if is_torch_available() else None ) SCREAMING_SNAKE_CASE_ : Tuple = ( { """feature-extraction""": SqueezeBertModel, """fill-mask""": SqueezeBertForMaskedLM, """question-answering""": SqueezeBertForQuestionAnswering, """text-classification""": SqueezeBertForSequenceClassification, """token-classification""": SqueezeBertForTokenClassification, """zero-shot""": SqueezeBertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ : Optional[int] = False SCREAMING_SNAKE_CASE_ : str = True SCREAMING_SNAKE_CASE_ : Optional[int] = False def __UpperCAmelCase ( self : Tuple ) -> Any: a = SqueezeBertModelTester(self ) a = ConfigTester(self , config_class=__lowerCamelCase , dim=37 ) def __UpperCAmelCase ( self : int ) -> int: self.config_tester.run_common_tests() def __UpperCAmelCase ( self : Tuple ) -> Dict: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_model(*__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[int] ) -> Dict: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_masked_lm(*__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_question_answering(*__lowerCamelCase ) def __UpperCAmelCase ( self : Dict ) -> Any: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_sequence_classification(*__lowerCamelCase ) def __UpperCAmelCase ( self : Union[str, Any] ) -> str: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_token_classification(*__lowerCamelCase ) def __UpperCAmelCase ( self : Optional[Any] ) -> List[Any]: a = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_squeezebert_for_multiple_choice(*__lowerCamelCase ) @slow def __UpperCAmelCase ( self : Any ) -> Any: for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: a = SqueezeBertModel.from_pretrained(__lowerCamelCase ) self.assertIsNotNone(__lowerCamelCase ) @require_sentencepiece @require_tokenizers @require_torch class snake_case__ (unittest.TestCase ): """simple docstring""" @slow def __UpperCAmelCase ( self : str ) -> Union[str, Any]: a = SqueezeBertForSequenceClassification.from_pretrained("squeezebert/squeezebert-mnli" ) a = torch.tensor([[1, 2_94_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 13, 15_88, 2]] ) a = model(__lowerCamelCase )[0] a = torch.Size((1, 3) ) self.assertEqual(output.shape , __lowerCamelCase ) a = torch.tensor([[0.6_401, -0.0_349, -0.6_041]] ) self.assertTrue(torch.allclose(__lowerCamelCase , __lowerCamelCase , atol=1e-4 ) )
107
from __future__ import annotations from scipy.special import comb # type: ignore class snake_case__ : """simple docstring""" def __init__( self : Any , __lowerCamelCase : list[tuple[float, float]] ) -> Tuple: a = list_of_points # Degree determines the flexibility of the curve. # Degree = 1 will produce a straight line. a = len(__lowerCamelCase ) - 1 def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : float ) -> list[float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." a = [] for i in range(len(self.list_of_points ) ): # basis function for each i output_values.append( comb(self.degree , __lowerCamelCase ) * ((1 - t) ** (self.degree - i)) * (t**i) ) # the basis must sum up to 1 for it to produce a valid Bezier curve. assert round(sum(__lowerCamelCase ) , 5 ) == 1 return output_values def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float ) -> tuple[float, float]: assert 0 <= t <= 1, "Time t must be between 0 and 1." a = self.basis_function(__lowerCamelCase ) a = 0.0 a = 0.0 for i in range(len(self.list_of_points ) ): # For all points, sum up the product of i-th basis function and i-th point. x += basis_function[i] * self.list_of_points[i][0] y += basis_function[i] * self.list_of_points[i][1] return (x, y) def __UpperCAmelCase ( self : Optional[Any] , __lowerCamelCase : float = 0.01 ) -> List[str]: from matplotlib import pyplot as plt # type: ignore a = [] # x coordinates of points to plot a = [] # y coordinates of points to plot a = 0.0 while t <= 1: a = self.bezier_curve_function(__lowerCamelCase ) to_plot_x.append(value[0] ) to_plot_y.append(value[1] ) t += step_size a = [i[0] for i in self.list_of_points] a = [i[1] for i in self.list_of_points] plt.plot( __lowerCamelCase , __lowerCamelCase , color="blue" , label="Curve of Degree " + str(self.degree ) , ) plt.scatter(__lowerCamelCase , __lowerCamelCase , color="red" , label="Control Points" ) plt.legend() plt.show() if __name__ == "__main__": import doctest doctest.testmod() BezierCurve([(1, 2), (3, 5)]).plot_curve() # degree 1 BezierCurve([(0, 0), (5, 5), (5, 0)]).plot_curve() # degree 2 BezierCurve([(0, 0), (5, 5), (5, 0), (2.5, -2.5)]).plot_curve() # degree 3
107
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCamelCase_ : int = { """configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : List[str] = ["""BloomTokenizerFast"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ : Optional[int] = [ """BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""", """BloomForCausalLM""", """BloomModel""", """BloomPreTrainedModel""", """BloomForSequenceClassification""", """BloomForTokenClassification""", """BloomForQuestionAnswering""", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys lowerCamelCase_ : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
215
"""simple docstring""" from __future__ import annotations from typing import Generic, TypeVar lowerCamelCase_ : List[Any] = TypeVar("""T""") class __A ( Generic[T] ): """simple docstring""" def __init__( self , __A ) -> None: a =data a =self a =0 class __A ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: # map from node name to the node object a ={} def SCREAMING_SNAKE_CASE ( self , __A ) -> None: # create a new set with x as its member a =DisjointSetTreeNode(__A ) def SCREAMING_SNAKE_CASE ( self , __A ) -> DisjointSetTreeNode[T]: # find the set x belongs to (with path-compression) a =self.map[data] if elem_ref != elem_ref.parent: a =self.find_set(elem_ref.parent.data ) return elem_ref.parent def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None: # helper function for union operation if nodea.rank > nodea.rank: a =nodea else: a =nodea if nodea.rank == nodea.rank: nodea.rank += 1 def SCREAMING_SNAKE_CASE ( self , __A , __A ) -> None: # merge 2 disjoint sets self.link(self.find_set(__A ) , self.find_set(__A ) ) class __A ( Generic[T] ): """simple docstring""" def __init__( self ) -> None: # connections: map from the node to the neighbouring nodes (with weights) a ={} def SCREAMING_SNAKE_CASE ( self , __A ) -> None: # add a node ONLY if its not present in the graph if node not in self.connections: a ={} def SCREAMING_SNAKE_CASE ( self , __A , __A , __A ) -> None: # add an edge with the given weight self.add_node(__A ) self.add_node(__A ) a =weight a =weight def SCREAMING_SNAKE_CASE ( self ) -> GraphUndirectedWeighted[T]: a =[] a =set() for start in self.connections: for end in self.connections[start]: if (start, end) not in seen: seen.add((end, start) ) edges.append((start, end, self.connections[start][end]) ) edges.sort(key=lambda __A : x[2] ) # creating the disjoint set a =DisjointSetTree[T]() for node in self.connections: disjoint_set.make_set(__A ) # MST generation a =0 a =0 a =GraphUndirectedWeighted[T]() while num_edges < len(self.connections ) - 1: a , a , a =edges[index] index += 1 a =disjoint_set.find_set(__A ) a =disjoint_set.find_set(__A ) if parent_u != parent_v: num_edges += 1 graph.add_edge(__A , __A , __A ) disjoint_set.union(__A , __A ) return graph
215
1
"""simple docstring""" # HF Trainer benchmarking tool # # This tool can be used to run and compare multiple dimensions of the HF Trainers args. # # It then prints a report once in github format with all the information that needs to be shared # with others and second time in a console-friendly format, so it's easier to use for tuning things up. # # The main idea is: # # ./trainer-benchmark.py --base-cmd '<cmd args that don't change>' \ # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' \ # --target-metric-key train_samples_per_second # # The variations can be any command line argument that you want to compare and not just dtype as in # the example. # # --variations allows you to compare variations in multiple dimensions. # # as the first dimention has 2 options and the second 3 in our example, this will run the trainer 6 # times adding one of: # # 1. --tf32 0 --fp16 0 # 2. --tf32 0 --fp16 1 # 3. --tf32 0 --bf16 1 # 4. --tf32 1 --fp16 0 # 5. --tf32 1 --fp16 1 # 6. --tf32 1 --bf16 1 # # and print the results. This is just a cartesian product - and more than 2 dimensions can be used. # # If you want to rely on defaults, this: # --variations '--tf32 0|--tf32 1' '--fp16 0|--fp16 1|--bf16 1' # is identical to this: # --variations '--tf32 0|--tf32 1' '|--fp16|--bf16' # # the leading empty variation in the 2nd dimension is a valid variation. # # So here we get the following 6 variations: # # 1. --tf32 0 # 2. --tf32 0 --fp16 # 3. --tf32 0 --bf16 # 4. --tf32 1 # 5. --tf32 1 --fp16 # 6. --tf32 1 --bf16 # # In this particular case we don't know what the default tf32 setting is as it's normally # pytorch-version dependent). That's why it's best to do an explicit setting of each variation: # `--tf32 0|--tf32 1` # # Here is a full example of a train: # # CUDA_VISIBLE_DEVICES=0 python ./scripts/benchmark/trainer-benchmark.py \ # --base-cmd \ # ' examples/pytorch/translation/run_translation.py --model_name_or_path t5-small \ # --output_dir output_dir --do_train --label_smoothing 0.1 --logging_strategy no \ # --save_strategy no --per_device_train_batch_size 32 --max_source_length 512 \ # --max_target_length 512 --num_train_epochs 1 --overwrite_output_dir \ # --source_lang en --target_lang ro --dataset_name wmt16 --dataset_config "ro-en" \ # --source_prefix "translate English to Romanian: " --warmup_steps 50 \ # --max_train_samples 20000 --dataloader_num_workers 2 ' \ # --target-metric-key train_samples_per_second --repeat-times 1 --variations \ # '|--fp16|--bf16' '--tf32 0|--tf32 1' --report-metric-keys train_loss \ # --repeat-times 1 --base-variation '--tf32 0' # # and here is a possible output: # # # | Variation | Train | Diff | Train | # | | samples | % | loss | # | | per | | | # | | second | | | # |:----------------|----------:|-------:|--------:| # | --tf32 0 | 285.11 | 0 | 2.51 | # | --tf32 1 | 342.09 | 20 | 2.51 | # | --fp16 --tf32 0 | 423.49 | 49 | 2.51 | # | --fp16 --tf32 1 | 423.13 | 48 | 2.51 | # | --bf16 --tf32 0 | 416.80 | 46 | 2.52 | # | --bf16 --tf32 1 | 415.87 | 46 | 2.52 | # # # So you can quickly compare the different outcomes. # # Typically running each experiment once is enough, but if the environment is unstable you can # re-run each multiple times, e.g., 3 using --repeat-times 3 and it will report the averaged results. # # By default it'll use the lowest result as the base line to use as 100% and then compare the rest to # it as can be seen from the table above, but you can also specify which combination is the one to use as # the baseline, e.g., to change to another entry use: --base-variation '--tf32 1 --fp16 0' # # --target-metric-key is there to tell the program which metrics to compare - the different metric keys are # inside output_dir/all_results.json. e.g., to measure eval performance instead of train use: # --target-metric-key eval_samples_per_second # but of course you will need to adjust the --base-cmd value in the example to perform evaluation as # well (as currently it doesn't) # import argparse import datetime import io import itertools import json import math import os import platform import re import shlex import subprocess import sys from pathlib import Path from statistics import fmean import pandas as pd import torch from tqdm import tqdm import transformers a :List[str] = float("nan") class __a : '''simple docstring''' def __init__( self , _a ) -> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = sys.stdout SCREAMING_SNAKE_CASE__ : Union[str, Any] = open(_a , """a""" ) def __getattr__( self , _a ) -> Optional[int]: """simple docstring""" return getattr(self.stdout , _a ) def _a ( self , _a ) -> Any: """simple docstring""" self.stdout.write(_a ) # strip tqdm codes self.file.write(re.sub(r"""^.*\r""" , """""" , _a , 0 , re.M ) ) def _lowercase ( __lowerCAmelCase=80 , __lowerCAmelCase=False ) -> List[str]: SCREAMING_SNAKE_CASE__ : Any = [] # deal with critical env vars SCREAMING_SNAKE_CASE__ : Optional[int] = ["""CUDA_VISIBLE_DEVICES"""] for key in env_keys: SCREAMING_SNAKE_CASE__ : List[Any] = os.environ.get(__lowerCAmelCase , __lowerCAmelCase ) if val is not None: cmd.append(F'''{key}={val}''' ) # python executable (not always needed if the script is executable) SCREAMING_SNAKE_CASE__ : Optional[int] = sys.executable if full_python_path else sys.executable.split("""/""" )[-1] cmd.append(__lowerCAmelCase ) # now the normal args cmd += list(map(shlex.quote , sys.argv ) ) # split up into up to MAX_WIDTH lines with shell multi-line escapes SCREAMING_SNAKE_CASE__ : Dict = [] SCREAMING_SNAKE_CASE__ : Optional[int] = """""" while len(__lowerCAmelCase ) > 0: current_line += F'''{cmd.pop(0 )} ''' if len(__lowerCAmelCase ) == 0 or len(__lowerCAmelCase ) + len(cmd[0] ) + 1 > max_width - 1: lines.append(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : List[str] = """""" return "\\\n".join(__lowerCAmelCase ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> List[Any]: # unwrap multi-line input SCREAMING_SNAKE_CASE__ : int = re.sub(r"""[\\\n]+""" , """ """ , args.base_cmd ) # remove --output_dir if any and set our own SCREAMING_SNAKE_CASE__ : Any = re.sub("""--output_dir\s+[^\s]+""" , """""" , args.base_cmd ) args.base_cmd += F''' --output_dir {output_dir}''' # ensure we have --overwrite_output_dir SCREAMING_SNAKE_CASE__ : Any = re.sub("""--overwrite_output_dir\s+""" , """""" , args.base_cmd ) args.base_cmd += " --overwrite_output_dir" return [sys.executable] + shlex.split(args.base_cmd ) def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Tuple: # Enable to debug everything but the run itself, to do it fast and see the progress. # This is useful for debugging the output formatting quickly - we can remove it later once # everybody is happy with the output if 0: import random from time import sleep sleep(0 ) return dict( {k: random.uniform(0 , 100 ) for k in metric_keys} , **{target_metric_key: random.choice([nan, 10.31, 100.2, 55.6_666, 222.22_222_222] )} , ) SCREAMING_SNAKE_CASE__ : Optional[int] = subprocess.run(__lowerCAmelCase , capture_output=__lowerCAmelCase , text=__lowerCAmelCase ) if verbose: print("""STDOUT""" , result.stdout ) print("""STDERR""" , result.stderr ) # save the streams SCREAMING_SNAKE_CASE__ : Optional[int] = variation.replace(""" """ , """-""" ) with open(Path(__lowerCAmelCase ) / F'''log.{prefix}.stdout.txt''' , """w""" ) as f: f.write(result.stdout ) with open(Path(__lowerCAmelCase ) / F'''log.{prefix}.stderr.txt''' , """w""" ) as f: f.write(result.stderr ) if result.returncode != 0: if verbose: print("""failed""" ) return {target_metric_key: nan} with io.open(F'''{output_dir}/all_results.json''' , """r""" , encoding="""utf-8""" ) as f: SCREAMING_SNAKE_CASE__ : Optional[Any] = json.load(__lowerCAmelCase ) # filter out just the keys we want return {k: v for k, v in metrics.items() if k in metric_keys} def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , ) -> List[str]: SCREAMING_SNAKE_CASE__ : Optional[int] = [] SCREAMING_SNAKE_CASE__ : Tuple = [] SCREAMING_SNAKE_CASE__ : str = F'''{id}: {variation:<{longest_variation_len}}''' SCREAMING_SNAKE_CASE__ : Tuple = F'''{preamble}: ''' SCREAMING_SNAKE_CASE__ : List[Any] = set(report_metric_keys + [target_metric_key] ) for i in tqdm(range(__lowerCAmelCase ) , desc=__lowerCAmelCase , leave=__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : Tuple = process_run_single( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Tuple = single_run_metrics[target_metric_key] if not math.isnan(__lowerCAmelCase ): metrics.append(__lowerCAmelCase ) results.append(__lowerCAmelCase ) outcome += "✓" else: outcome += "✘" SCREAMING_SNAKE_CASE__ : int = F'''\33[2K\r{outcome}''' if len(__lowerCAmelCase ) > 0: SCREAMING_SNAKE_CASE__ : int = {k: fmean([x[k] for x in metrics] ) for k in metrics[0].keys()} SCREAMING_SNAKE_CASE__ : Optional[int] = round(mean_metrics[target_metric_key] , 2 ) SCREAMING_SNAKE_CASE__ : Dict = F'''{outcome} {mean_target}''' if len(__lowerCAmelCase ) > 1: results_str += F''' {tuple(round(__lowerCAmelCase , 2 ) for x in results )}''' print(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[int] = variation return mean_metrics else: print(__lowerCAmelCase ) return {variation_key: variation, target_metric_key: nan} def _lowercase ( ) -> Union[str, Any]: SCREAMING_SNAKE_CASE__ : Dict = torch.cuda.get_device_properties(torch.device("""cuda""" ) ) return F''' Datetime : {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )} Software: transformers: {transformers.__version__} torch : {torch.__version__} cuda : {torch.version.cuda} python : {platform.python_version()} Hardware: {torch.cuda.device_count()} GPUs : {properties.name}, {properties.total_memory/2**30:0.2f}GB ''' def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]: SCREAMING_SNAKE_CASE__ : Tuple = pd.DataFrame(__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Optional[Any] = """variation""" SCREAMING_SNAKE_CASE__ : Optional[int] = """diff_%""" SCREAMING_SNAKE_CASE__ : Any = nan if base_variation is not None and len(df[df[variation_key] == base_variation] ): # this may still return nan SCREAMING_SNAKE_CASE__ : Dict = df.loc[df[variation_key] == base_variation][target_metric_key].item() if math.isnan(__lowerCAmelCase ): # as a fallback, use the minimal value as the sentinel SCREAMING_SNAKE_CASE__ : Any = df.loc[df[target_metric_key] != nan][target_metric_key].min() # create diff column if possible if not math.isnan(__lowerCAmelCase ): SCREAMING_SNAKE_CASE__ : List[Any] = df.apply( lambda __lowerCAmelCase : round(100 * (r[target_metric_key] - sentinel_value) / sentinel_value ) if not math.isnan(r[target_metric_key] ) else 0 , axis="""columns""" , ) # re-order columns SCREAMING_SNAKE_CASE__ : Any = [variation_key, target_metric_key, diff_key, *report_metric_keys] SCREAMING_SNAKE_CASE__ : Optional[Any] = df.reindex(__lowerCAmelCase , axis="""columns""" ) # reorder cols # capitalize SCREAMING_SNAKE_CASE__ : List[Any] = df.rename(str.capitalize , axis="""columns""" ) # make the cols as narrow as possible SCREAMING_SNAKE_CASE__ : Any = df.rename(lambda __lowerCAmelCase : c.replace("""_""" , """<br>""" ) , axis="""columns""" ) SCREAMING_SNAKE_CASE__ : int = df.rename(lambda __lowerCAmelCase : c.replace("""_""" , """\n""" ) , axis="""columns""" ) SCREAMING_SNAKE_CASE__ : str = ["""""", """Copy between the cut-here-lines and paste as is to github or a forum"""] report += ["----------8<-----------------8<--------"] report += ["*** Results:", df_github.to_markdown(index=__lowerCAmelCase , floatfmt=""".2f""" )] report += ["```"] report += ["*** Setup:", get_versions()] report += ["*** The benchmark command line was:", get_original_command()] report += ["```"] report += ["----------8<-----------------8<--------"] report += ["*** Results (console):", df_console.to_markdown(index=__lowerCAmelCase , floatfmt=""".2f""" )] print("""\n\n""".join(__lowerCAmelCase ) ) def _lowercase ( ) -> Dict: SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser() parser.add_argument( """--base-cmd""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Base cmd""" , ) parser.add_argument( """--variations""" , default=__lowerCAmelCase , type=__lowerCAmelCase , nargs="""+""" , required=__lowerCAmelCase , help="""Multi-dimensional variations, example: '|--fp16|--bf16' '|--tf32'""" , ) parser.add_argument( """--base-variation""" , default=__lowerCAmelCase , type=__lowerCAmelCase , help="""Baseline variation to compare to. if None the minimal target value will be used to compare against""" , ) parser.add_argument( """--target-metric-key""" , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help="""Target metric key in output_dir/all_results.json, e.g., train_samples_per_second""" , ) parser.add_argument( """--report-metric-keys""" , default="""""" , type=__lowerCAmelCase , help="""Report metric keys - other metric keys from output_dir/all_results.json to report, e.g., train_loss. Use a single argument e.g., 'train_loss train_samples""" , ) parser.add_argument( """--repeat-times""" , default=1 , type=__lowerCAmelCase , help="""How many times to re-run each variation - an average will be reported""" , ) parser.add_argument( """--output_dir""" , default="""output_benchmark""" , type=__lowerCAmelCase , help="""The output directory where all the benchmark reports will go to and additionally this directory will be used to override --output_dir in the script that is being benchmarked""" , ) parser.add_argument( """--verbose""" , default=__lowerCAmelCase , action="""store_true""" , help="""Whether to show the outputs of each run or just the benchmark progress""" , ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() SCREAMING_SNAKE_CASE__ : Any = args.output_dir Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase ) SCREAMING_SNAKE_CASE__ : Any = get_base_command(__lowerCAmelCase , __lowerCAmelCase ) # split each dimension into its --foo variations SCREAMING_SNAKE_CASE__ : Dict = [list(map(str.strip , re.split(r"""\|""" , __lowerCAmelCase ) ) ) for x in args.variations] # build a cartesian product of dimensions and convert those back into cmd-line arg strings, # while stripping white space for inputs that were empty SCREAMING_SNAKE_CASE__ : str = list(map(str.strip , map(""" """.join , itertools.product(*__lowerCAmelCase ) ) ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = max(len(__lowerCAmelCase ) for x in variations ) # split wanted keys SCREAMING_SNAKE_CASE__ : int = args.report_metric_keys.split() # capture prints into a log file for convenience SCREAMING_SNAKE_CASE__ : Optional[Any] = F'''benchmark-report-{datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S' )}.txt''' print(F'''\nNote: each run\'s output is also logged under {output_dir}/log.*.std*.txt''' ) print(F'''and this script\'s output is also piped into {report_fn}''' ) SCREAMING_SNAKE_CASE__ : Any = Tee(__lowerCAmelCase ) print(F'''\n*** Running {len(__lowerCAmelCase )} benchmarks:''' ) print(F'''Base command: {' '.join(__lowerCAmelCase )}''' ) SCREAMING_SNAKE_CASE__ : str = """variation""" SCREAMING_SNAKE_CASE__ : int = [] for id, variation in enumerate(tqdm(__lowerCAmelCase , desc="""Total completion: """ , leave=__lowerCAmelCase ) ): SCREAMING_SNAKE_CASE__ : Tuple = base_cmd + variation.split() results.append( process_run( id + 1 , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.repeat_times , __lowerCAmelCase , args.verbose , ) ) process_results(__lowerCAmelCase , args.target_metric_key , __lowerCAmelCase , args.base_variation , __lowerCAmelCase ) if __name__ == "__main__": main()
132
"""simple docstring""" import math_equivalence # From: git+https://github.com/hendrycks/math.git import datasets a :str = "\\n@article{hendrycksmath2021,\n title={Measuring Mathematical Problem Solving With the MATH Dataset},\n author={Dan Hendrycks\n and Collin Burns\n and Saurav Kadavath\n and Akul Arora\n and Steven Basart\n and Eric Tang\n and Dawn Song\n and Jacob Steinhardt},\n journal={arXiv preprint arXiv:2103.03874},\n year={2021}\n}\n" a :List[Any] = "\\nThis metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.\nIt first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.\n" a :int = r"\nCalculates accuracy after canonicalizing inputs.\n\nArgs:\n predictions: list of predictions to score. Each prediction\n is a string that contains natural language and LaTex.\n references: list of reference for each prediction. Each\n reference is a string that contains natural language\n and LaTex.\nReturns:\n accuracy: accuracy after canonicalizing inputs\n (e.g., converting \"1/2\" to \"\\frac{1}{2}\")\n\nExamples:\n >>> metric = datasets.load_metric(\"competition_math\")\n >>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])\n >>> print(results)\n {'accuracy': 1.0}\n" @datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION) class __a (datasets.Metric): '''simple docstring''' def _a ( self ) -> List[Any]: """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" ), """references""": datasets.Value("""string""" ), } ) , homepage="""https://github.com/hendrycks/math""" , codebase_urls=["""https://github.com/hendrycks/math"""] , ) def _a ( self , _a , _a ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.0 for i, j in zip(_a , _a ): n_correct += 1.0 if math_equivalence.is_equiv(_a , _a ) else 0.0 SCREAMING_SNAKE_CASE__ : List[str] = n_correct / len(_a ) return { "accuracy": accuracy, }
132
1
"""simple docstring""" lowercase__ = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []} lowercase__ = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]} def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]: a__: List[Any] = True a__: Dict = [] for neighbour in graph[vert]: if not visited[neighbour]: order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) order.append(_SCREAMING_SNAKE_CASE ) return order def __a ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->list[int]: a__: int = True a__: str = [vert] for neighbour in reversed_graph[vert]: if not visited[neighbour]: component += find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return component def __a ( _SCREAMING_SNAKE_CASE ) ->list[list[int]]: a__: Optional[int] = len(_SCREAMING_SNAKE_CASE ) * [False] a__: dict[int, list[int]] = {vert: [] for vert in range(len(_SCREAMING_SNAKE_CASE ) )} for vert, neighbours in graph.items(): for neighbour in neighbours: reversed_graph[neighbour].append(_SCREAMING_SNAKE_CASE ) a__: int = [] for i, was_visited in enumerate(_SCREAMING_SNAKE_CASE ): if not was_visited: order += topology_sort(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) a__: int = [] a__: Optional[int] = len(_SCREAMING_SNAKE_CASE ) * [False] for i in range(len(_SCREAMING_SNAKE_CASE ) ): a__: Union[str, Any] = order[len(_SCREAMING_SNAKE_CASE ) - i - 1] if not visited[vert]: a__: Dict = find_components(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) components_list.append(_SCREAMING_SNAKE_CASE ) return components_list
364
"""simple docstring""" import collections from typing import List, Optional, Union from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging from ..bert.tokenization_bert import BertTokenizer lowercase__ = logging.get_logger(__name__) lowercase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'} lowercase__ = { 'vocab_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-ctx_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-ctx_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } lowercase__ = { 'vocab_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-question_encoder-single-nq-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-question_encoder-multiset-base': ( 'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json' ), }, } lowercase__ = { 'vocab_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt' ), }, 'tokenizer_file': { 'facebook/dpr-reader-single-nq-base': ( 'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json' ), 'facebook/dpr-reader-multiset-base': ( 'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json' ), }, } lowercase__ = { 'facebook/dpr-ctx_encoder-single-nq-base': 512, 'facebook/dpr-ctx_encoder-multiset-base': 512, } lowercase__ = { 'facebook/dpr-question_encoder-single-nq-base': 512, 'facebook/dpr-question_encoder-multiset-base': 512, } lowercase__ = { 'facebook/dpr-reader-single-nq-base': 512, 'facebook/dpr-reader-multiset-base': 512, } lowercase__ = { 'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True}, } lowercase__ = { 'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True}, } lowercase__ = { 'facebook/dpr-reader-single-nq-base': {'do_lower_case': True}, 'facebook/dpr-reader-multiset-base': {'do_lower_case': True}, } class __snake_case ( __lowerCAmelCase ): a__ = VOCAB_FILES_NAMES a__ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION class __snake_case ( __lowerCAmelCase ): a__ = VOCAB_FILES_NAMES a__ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP a__ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION lowercase__ = collections.namedtuple( 'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text'] ) lowercase__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits']) lowercase__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n ' @add_start_docstrings(__lowerCAmelCase ) class __snake_case : def __call__( self , lowercase , lowercase = None , lowercase = None , lowercase = False , lowercase = False , lowercase = None , lowercase = None , lowercase = None , **lowercase , ) -> BatchEncoding: '''simple docstring''' if titles is None and texts is None: return super().__call__( lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , ) elif titles is None or texts is None: a__: str = titles if texts is None else texts return super().__call__( lowercase , lowercase , padding=lowercase , truncation=lowercase , max_length=lowercase , return_tensors=lowercase , return_attention_mask=lowercase , **lowercase , ) a__: Tuple = titles if not isinstance(lowercase , lowercase) else [titles] a__: Optional[int] = texts if not isinstance(lowercase , lowercase) else [texts] a__: Dict = len(lowercase) a__: Dict = questions if not isinstance(lowercase , lowercase) else [questions] * n_passages if len(lowercase) != len(lowercase): raise ValueError( f'There should be as many titles than texts but got {len(lowercase)} titles and {len(lowercase)} texts.') a__: List[str] = super().__call__(lowercase , lowercase , padding=lowercase , truncation=lowercase)['input_ids'] a__: List[Any] = super().__call__(lowercase , add_special_tokens=lowercase , padding=lowercase , truncation=lowercase)['input_ids'] a__: Optional[Any] = { 'input_ids': [ (encoded_question_and_title + encoded_text)[:max_length] if max_length is not None and truncation else encoded_question_and_title + encoded_text for encoded_question_and_title, encoded_text in zip(lowercase , lowercase) ] } if return_attention_mask is not False: a__: Dict = [] for input_ids in encoded_inputs["input_ids"]: attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids]) a__: int = attention_mask return self.pad(lowercase , padding=lowercase , max_length=lowercase , return_tensors=lowercase) def lowerCamelCase_ ( self , lowercase , lowercase , lowercase = 16 , lowercase = 64 , lowercase = 4 , ) -> List[DPRSpanPrediction]: '''simple docstring''' a__: Dict = reader_input['input_ids'] a__ , a__ , a__: Tuple = reader_output[:3] a__: Tuple = len(lowercase) a__: Optional[Any] = sorted(range(lowercase) , reverse=lowercase , key=relevance_logits.__getitem__) a__: List[DPRReaderOutput] = [] for doc_id in sorted_docs: a__: Tuple = list(input_ids[doc_id]) # assuming question & title information is at the beginning of the sequence a__: Dict = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id if sequence_ids[-1] == self.pad_token_id: a__: Any = sequence_ids.index(self.pad_token_id) else: a__: Optional[Any] = len(lowercase) a__: Optional[int] = self._get_best_spans( start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=lowercase , top_spans=lowercase , ) for start_index, end_index in best_spans: start_index += passage_offset end_index += passage_offset nbest_spans_predictions.append( DPRSpanPrediction( span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=lowercase , start_index=lowercase , end_index=lowercase , text=self.decode(sequence_ids[start_index : end_index + 1]) , )) if len(lowercase) >= num_spans: break return nbest_spans_predictions[:num_spans] def lowerCamelCase_ ( self , lowercase , lowercase , lowercase , lowercase , ) -> List[DPRSpanPrediction]: '''simple docstring''' a__: Optional[Any] = [] for start_index, start_score in enumerate(lowercase): for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]): scores.append(((start_index, start_index + answer_length), start_score + end_score)) a__: str = sorted(lowercase , key=lambda lowercase: x[1] , reverse=lowercase) a__: Optional[Any] = [] for (start_index, end_index), score in scores: if start_index > end_index: raise ValueError(f'Wrong span indices: [{start_index}:{end_index}]') a__: str = end_index - start_index + 1 if length > max_answer_length: raise ValueError(f'Span is too long: {length} > {max_answer_length}') if any( start_index <= prev_start_index <= prev_end_index <= end_index or prev_start_index <= start_index <= end_index <= prev_end_index for (prev_start_index, prev_end_index) in chosen_span_intervals): continue chosen_span_intervals.append((start_index, end_index)) if len(lowercase) == top_spans: break return chosen_span_intervals @add_end_docstrings(__lowerCAmelCase ) class __snake_case ( __lowerCAmelCase , __lowerCAmelCase ): a__ = VOCAB_FILES_NAMES a__ = READER_PRETRAINED_VOCAB_FILES_MAP a__ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a__ = READER_PRETRAINED_INIT_CONFIGURATION a__ = ["""input_ids""", """attention_mask"""]
203
0
from ...configuration_utils import PretrainedConfig class _lowercase (a_ ): '''simple docstring''' lowercase__ = """bert-generation""" def __init__( self , snake_case__=5_0358 , snake_case__=1024 , snake_case__=24 , snake_case__=16 , snake_case__=4096 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=512 , snake_case__=0.02 , snake_case__=1e-12 , snake_case__=0 , snake_case__=2 , snake_case__=1 , snake_case__="absolute" , snake_case__=True , **snake_case__ , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) UpperCamelCase_ = vocab_size UpperCamelCase_ = hidden_size UpperCamelCase_ = num_hidden_layers UpperCamelCase_ = num_attention_heads UpperCamelCase_ = hidden_act UpperCamelCase_ = intermediate_size UpperCamelCase_ = hidden_dropout_prob UpperCamelCase_ = attention_probs_dropout_prob UpperCamelCase_ = max_position_embeddings UpperCamelCase_ = initializer_range UpperCamelCase_ = layer_norm_eps UpperCamelCase_ = position_embedding_type UpperCamelCase_ = use_cache
128
from typing import Dict, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import flip_channel_order, resize, to_channel_dimension_format, to_pil_image from ...image_utils import ( ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends if is_vision_available(): import PIL # soft dependency if is_pytesseract_available(): import pytesseract UpperCAmelCase : List[Any] =logging.get_logger(__name__) def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase): return [ int(10_00 * (box[0] / width)), int(10_00 * (box[1] / height)), int(10_00 * (box[2] / width)), int(10_00 * (box[3] / height)), ] def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None): UpperCamelCase_ = tesseract_config if tesseract_config is not None else "" # apply OCR UpperCamelCase_ = to_pil_image(_lowerCAmelCase) UpperCamelCase_ , UpperCamelCase_ = pil_image.size UpperCamelCase_ = pytesseract.image_to_data(_lowerCAmelCase , lang=_lowerCAmelCase , output_type="dict" , config=_lowerCAmelCase) UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = data["text"], data["left"], data["top"], data["width"], data["height"] # filter empty words and corresponding coordinates UpperCamelCase_ = [idx for idx, word in enumerate(_lowerCAmelCase) if not word.strip()] UpperCamelCase_ = [word for idx, word in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices] UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices] UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices] UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices] UpperCamelCase_ = [coord for idx, coord in enumerate(_lowerCAmelCase) if idx not in irrelevant_indices] # turn coordinates into (left, top, left+width, top+height) format UpperCamelCase_ = [] for x, y, w, h in zip(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase): UpperCamelCase_ = [x, y, x + w, y + h] actual_boxes.append(_lowerCAmelCase) # finally, normalize the bounding boxes UpperCamelCase_ = [] for box in actual_boxes: normalized_boxes.append(normalize_box(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)) assert len(_lowerCAmelCase) == len(_lowerCAmelCase), "Not as many words as there are bounding boxes" return words, normalized_boxes class _lowercase (a_ ): '''simple docstring''' lowercase__ = ["""pixel_values"""] def __init__( self , snake_case__ = True , snake_case__ = None , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = True , snake_case__ = None , snake_case__ = "" , **snake_case__ , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCamelCase_ = size if size is not None else {"height": 224, "width": 224} UpperCamelCase_ = get_size_dict(snake_case__ ) UpperCamelCase_ = do_resize UpperCamelCase_ = size UpperCamelCase_ = resample UpperCamelCase_ = apply_ocr UpperCamelCase_ = ocr_lang UpperCamelCase_ = tesseract_config def _lowerCamelCase ( self , snake_case__ , snake_case__ , snake_case__ = PILImageResampling.BILINEAR , snake_case__ = None , **snake_case__ , ): '''simple docstring''' UpperCamelCase_ = get_size_dict(snake_case__ ) if "height" not in size or "width" not in size: raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" ) UpperCamelCase_ = (size["height"], size["width"]) return resize(snake_case__ , size=snake_case__ , resample=snake_case__ , data_format=snake_case__ , **snake_case__ ) def _lowerCamelCase ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = ChannelDimension.FIRST , **snake_case__ , ): '''simple docstring''' UpperCamelCase_ = do_resize if do_resize is not None else self.do_resize UpperCamelCase_ = size if size is not None else self.size UpperCamelCase_ = get_size_dict(snake_case__ ) UpperCamelCase_ = resample if resample is not None else self.resample UpperCamelCase_ = apply_ocr if apply_ocr is not None else self.apply_ocr UpperCamelCase_ = ocr_lang if ocr_lang is not None else self.ocr_lang UpperCamelCase_ = tesseract_config if tesseract_config is not None else self.tesseract_config UpperCamelCase_ = make_list_of_images(snake_case__ ) if not valid_images(snake_case__ ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) # All transformations expect numpy arrays. UpperCamelCase_ = [to_numpy_array(snake_case__ ) for image in images] if apply_ocr: requires_backends(self , "pytesseract" ) UpperCamelCase_ = [] UpperCamelCase_ = [] for image in images: UpperCamelCase_ , UpperCamelCase_ = apply_tesseract(snake_case__ , snake_case__ , snake_case__ ) words_batch.append(snake_case__ ) boxes_batch.append(snake_case__ ) if do_resize: UpperCamelCase_ = [self.resize(image=snake_case__ , size=snake_case__ , resample=snake_case__ ) for image in images] # flip color channels from RGB to BGR (as Detectron2 requires this) UpperCamelCase_ = [flip_channel_order(snake_case__ ) for image in images] UpperCamelCase_ = [to_channel_dimension_format(snake_case__ , snake_case__ ) for image in images] UpperCamelCase_ = BatchFeature(data={"pixel_values": images} , tensor_type=snake_case__ ) if apply_ocr: UpperCamelCase_ = words_batch UpperCamelCase_ = boxes_batch return data
128
1
import unittest import numpy as np from transformers import RobertaPreLayerNormConfig, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask if is_flax_available(): import jax.numpy as jnp from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import ( FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormModel, ) class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): def __init__( self : Optional[Any] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=13 , lowerCamelCase_ : Optional[Any]=7 , lowerCamelCase_ : Dict=True , lowerCamelCase_ : Any=True , lowerCamelCase_ : List[str]=True , lowerCamelCase_ : int=True , lowerCamelCase_ : Tuple=99 , lowerCamelCase_ : List[Any]=32 , lowerCamelCase_ : int=5 , lowerCamelCase_ : int=4 , lowerCamelCase_ : List[str]=37 , lowerCamelCase_ : str="gelu" , lowerCamelCase_ : Optional[int]=0.1 , lowerCamelCase_ : List[str]=0.1 , lowerCamelCase_ : Tuple=512 , lowerCamelCase_ : Any=16 , lowerCamelCase_ : Tuple=2 , lowerCamelCase_ : int=0.0_2 , lowerCamelCase_ : Any=4 , ): """simple docstring""" UpperCamelCase = parent UpperCamelCase = batch_size UpperCamelCase = seq_length UpperCamelCase = is_training UpperCamelCase = use_attention_mask UpperCamelCase = use_token_type_ids UpperCamelCase = use_labels UpperCamelCase = vocab_size UpperCamelCase = hidden_size UpperCamelCase = num_hidden_layers UpperCamelCase = num_attention_heads UpperCamelCase = intermediate_size UpperCamelCase = hidden_act UpperCamelCase = hidden_dropout_prob UpperCamelCase = attention_probs_dropout_prob UpperCamelCase = max_position_embeddings UpperCamelCase = type_vocab_size UpperCamelCase = type_sequence_label_size UpperCamelCase = initializer_range UpperCamelCase = num_choices def lowerCamelCase_ ( self : Optional[Any] ): """simple docstring""" UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCamelCase = None if self.use_attention_mask: UpperCamelCase = random_attention_mask([self.batch_size, self.seq_length] ) UpperCamelCase = None if self.use_token_type_ids: UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCamelCase = RobertaPreLayerNormConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCamelCase_ , initializer_range=self.initializer_range , ) return config, input_ids, token_type_ids, attention_mask def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask} return config, inputs_dict def lowerCamelCase_ ( self : Any ): """simple docstring""" UpperCamelCase = self.prepare_config_and_inputs() UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = config_and_inputs UpperCamelCase = True UpperCamelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] ) UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 ) return ( config, input_ids, token_type_ids, encoder_hidden_states, encoder_attention_mask, ) @require_flax # Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40 class SCREAMING_SNAKE_CASE_ ( __lowerCAmelCase , unittest.TestCase ): __lowerCAmelCase = True __lowerCAmelCase = ( ( FlaxRobertaPreLayerNormModel, FlaxRobertaPreLayerNormForCausalLM, FlaxRobertaPreLayerNormForMaskedLM, FlaxRobertaPreLayerNormForSequenceClassification, FlaxRobertaPreLayerNormForTokenClassification, FlaxRobertaPreLayerNormForMultipleChoice, FlaxRobertaPreLayerNormForQuestionAnswering, ) if is_flax_available() else () ) def lowerCamelCase_ ( self : str ): """simple docstring""" UpperCamelCase = FlaxRobertaPreLayerNormModelTester(self ) @slow def lowerCamelCase_ ( self : str ): """simple docstring""" for model_class_name in self.all_model_classes: UpperCamelCase = model_class_name.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ ) UpperCamelCase = model(np.ones((1, 1) ) ) self.assertIsNotNone(lowerCamelCase_ ) @require_flax class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ): @slow def lowerCamelCase_ ( self : Union[str, Any] ): """simple docstring""" UpperCamelCase = FlaxRobertaPreLayerNormForMaskedLM.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ ) UpperCamelCase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa ) UpperCamelCase = model(lowerCamelCase_ )[0] UpperCamelCase = [1, 11, 5_0265] self.assertEqual(list(output.shape ) , lowerCamelCase_ ) # compare the actual values for a slice. UpperCamelCase = np.array( [[[4_0.4_8_8_0, 1_8.0_1_9_9, -5.2_3_6_7], [-1.8_8_7_7, -4.0_8_8_5, 1_0.7_0_8_5], [-2.2_6_1_3, -5.6_1_1_0, 7.2_6_6_5]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) ) @slow def lowerCamelCase_ ( self : Optional[int] ): """simple docstring""" UpperCamelCase = FlaxRobertaPreLayerNormModel.from_pretrained("""andreasmadsen/efficient_mlm_m0.40""" , from_pt=lowerCamelCase_ ) UpperCamelCase = np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa ) UpperCamelCase = model(lowerCamelCase_ )[0] # compare the actual values for a slice. UpperCamelCase = np.array( [[[0.0_2_0_8, -0.0_3_5_6, 0.0_2_3_7], [-0.1_5_6_9, -0.0_4_1_1, -0.2_6_2_6], [0.1_8_7_9, 0.0_1_2_5, -0.0_0_8_9]]] , dtype=np.floataa ) self.assertTrue(np.allclose(output[:, :3, :3] , lowerCamelCase_ , atol=1E-4 ) )
165
def lowercase( UpperCamelCase_ = 1000000 ) -> int: '''simple docstring''' UpperCamelCase = [i - 1 for i in range(limit + 1 )] for i in range(2 , limit + 1 ): if phi[i] == i - 1: for j in range(2 * i , limit + 1 , UpperCamelCase_ ): phi[j] -= phi[j] // i return sum(phi[2 : limit + 1] ) if __name__ == "__main__": print(solution())
165
1
"""simple docstring""" import sys from collections import defaultdict class A__ : def __init__( self ): __lowerCAmelCase : Optional[Any] = [] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ): return self.node_position[vertex] def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = pos def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): if start > size // 2 - 1: return else: if 2 * start + 2 >= size: __lowerCAmelCase : Union[str, Any] = 2 * start + 1 else: if heap[2 * start + 1] < heap[2 * start + 2]: __lowerCAmelCase : str = 2 * start + 1 else: __lowerCAmelCase : Dict = 2 * start + 2 if heap[smallest_child] < heap[start]: __lowerCAmelCase , __lowerCAmelCase : str = heap[smallest_child], positions[smallest_child] __lowerCAmelCase , __lowerCAmelCase : Optional[int] = ( heap[start], positions[start], ) __lowerCAmelCase , __lowerCAmelCase : Any = temp, tempa __lowerCAmelCase : Dict = self.get_position(positions[smallest_child] ) self.set_position( positions[smallest_child] , self.get_position(positions[start] ) ) self.set_position(positions[start] , _SCREAMING_SNAKE_CASE ) self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Any = position[index] while index != 0: __lowerCAmelCase : str = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 ) if val < heap[parent]: __lowerCAmelCase : Any = heap[parent] __lowerCAmelCase : Tuple = position[parent] self.set_position(position[parent] , _SCREAMING_SNAKE_CASE ) else: __lowerCAmelCase : str = val __lowerCAmelCase : Dict = temp self.set_position(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) break __lowerCAmelCase : str = parent else: __lowerCAmelCase : List[Any] = val __lowerCAmelCase : int = temp self.set_position(_SCREAMING_SNAKE_CASE , 0 ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE ) // 2 - 1 for i in range(_SCREAMING_SNAKE_CASE , -1 , -1 ): self.top_to_bottom(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ): __lowerCAmelCase : Tuple = positions[0] __lowerCAmelCase : Optional[int] = sys.maxsize self.top_to_bottom(_SCREAMING_SNAKE_CASE , 0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) return temp def __lowerCAmelCase (_UpperCamelCase ): __lowerCAmelCase : int = Heap() __lowerCAmelCase : str = [0] * len(_UpperCamelCase ) __lowerCAmelCase : int = [-1] * len(_UpperCamelCase ) # Neighboring Tree Vertex of selected vertex # Minimum Distance of explored vertex with neighboring vertex of partial tree # formed in graph __lowerCAmelCase : List[str] = [] # Heap of Distance of vertices from their neighboring vertex __lowerCAmelCase : Tuple = [] for vertex in range(len(_UpperCamelCase ) ): distance_tv.append(sys.maxsize ) positions.append(_UpperCamelCase ) heap.node_position.append(_UpperCamelCase ) __lowerCAmelCase : Any = [] __lowerCAmelCase : List[str] = 1 __lowerCAmelCase : Optional[int] = sys.maxsize for neighbor, distance in adjacency_list[0]: __lowerCAmelCase : int = 0 __lowerCAmelCase : List[Any] = distance heap.heapify(_UpperCamelCase , _UpperCamelCase ) for _ in range(1 , len(_UpperCamelCase ) ): __lowerCAmelCase : str = heap.delete_minimum(_UpperCamelCase , _UpperCamelCase ) if visited[vertex] == 0: tree_edges.append((nbr_tv[vertex], vertex) ) __lowerCAmelCase : int = 1 for neighbor, distance in adjacency_list[vertex]: if ( visited[neighbor] == 0 and distance < distance_tv[heap.get_position(_UpperCamelCase )] ): __lowerCAmelCase : Tuple = distance heap.bottom_to_top( _UpperCamelCase , heap.get_position(_UpperCamelCase ) , _UpperCamelCase , _UpperCamelCase ) __lowerCAmelCase : str = vertex return tree_edges if __name__ == "__main__": # pragma: no cover # < --------- Prims Algorithm --------- > lowerCamelCase__ = int(input("""Enter number of edges: """).strip()) lowerCamelCase__ = defaultdict(list) for _ in range(edges_number): lowerCamelCase__ = [int(x) for x in input().strip().split()] adjacency_list[edge[0]].append([edge[1], edge[2]]) adjacency_list[edge[1]].append([edge[0], edge[2]]) print(prisms_algorithm(adjacency_list))
86
import unittest from transformers import AutoTokenizer, is_flax_available from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow if is_flax_available(): import jax.numpy as jnp from transformers import FlaxXLMRobertaModel @require_sentencepiece @require_tokenizers @require_flax class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ): '''simple docstring''' @slow def A ( self : int ): '''simple docstring''' _snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' ) _snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' ) _snake_case = 'The dog is cute and lives in the garden house' _snake_case = jnp.array([tokenizer.encode(lowercase )] ) _snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim _snake_case = jnp.array( [[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] ) _snake_case = model(lowercase )['last_hidden_state'] self.assertEqual(output.shape , lowercase ) # compare the actual values for a slice of last dim self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) )
282
0
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ): # Initialise PyTorch model lowercase :Optional[int] = LxmertConfig.from_json_file(lowerCamelCase ) print(F"Building PyTorch model from configuration: {config}" ) lowercase :Any = LxmertForPreTraining(lowerCamelCase ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(lowerCamelCase, lowerCamelCase, lowerCamelCase ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict(), lowerCamelCase ) if __name__ == "__main__": _UpperCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) _UpperCAmelCase : Any = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
158
import math import os import re import sys import unittest from pathlib import Path from typing import Tuple from unittest.mock import patch from parameterized import parameterized from transformers.testing_utils import ( CaptureStderr, ExtendSysPath, TestCasePlus, execute_subprocess_async, get_gpu_count, get_torch_dist_unique_port, require_apex, require_bitsandbytes, require_fairscale, require_torch, require_torch_gpu, require_torch_multi_gpu, require_torch_non_multi_gpu, slow, ) from transformers.trainer_callback import TrainerState from transformers.trainer_utils import set_seed _UpperCAmelCase : Optional[Any] = os.path.abspath(os.path.dirname(__file__)) with ExtendSysPath(f'''{bindir}/../../examples/pytorch/translation'''): from run_translation import main # noqa set_seed(42) _UpperCAmelCase : Union[str, Any] = "sshleifer/student_marian_en_ro_6_1" _UpperCAmelCase : Any = "sshleifer/tiny-mbart" @require_torch class __lowerCAmelCase ( lowerCAmelCase): def SCREAMING_SNAKE_CASE ( self: int , _lowerCAmelCase: int=False , _lowerCAmelCase: str=None , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Dict=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Union[str, Any]=True , ): lowercase :Any = self.run_trainer( eval_steps=1 , max_len=12 , model_name=_lowerCAmelCase , num_train_epochs=1 , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , predict_with_generate=_lowerCAmelCase , do_train=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , ) lowercase :List[Any] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history if not do_eval: return lowercase :Union[str, Any] = [log for log in logs if "eval_loss" in log.keys()] lowercase :Any = eval_metrics[0] if predict_with_generate: assert "eval_bleu" in first_step_stats lowercase :Optional[Any] = eval_metrics[-1] assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase ) assert not math.isnan(float(last_step_stats["eval_loss"] ) ), "eval_loss must not be `nan`" @require_torch_non_multi_gpu def SCREAMING_SNAKE_CASE ( self: List[Any] ): self.run_seqaseq_quick() @require_torch_multi_gpu def SCREAMING_SNAKE_CASE ( self: str ): self.run_seqaseq_quick(distributed=_lowerCAmelCase ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE ( self: Tuple ): self.run_seqaseq_quick(distributed=_lowerCAmelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Optional[int] ): self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp simple --fp16" ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Dict ): self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2" , predict_with_generate=_lowerCAmelCase ) @unittest.skip("Requires an update of the env running those tests" ) @require_torch_multi_gpu @require_fairscale def SCREAMING_SNAKE_CASE ( self: Optional[Any] ): self.run_seqaseq_quick( distributed=_lowerCAmelCase , extra_args_str="--sharded_ddp zero_dp_2 --fp16" , predict_with_generate=_lowerCAmelCase ) @require_apex @require_torch_gpu def SCREAMING_SNAKE_CASE ( self: List[Any] ): # XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same # program and it breaks other tests that run from the same pytest worker, therefore until this is # sorted out it must be run only in an external program, that is distributed=True in this # test and only under one or more gpus - if we want cpu will need to make a special test # # specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via # 2nd main() call it botches the future eval. # self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" ) # test 2nd time - was getting eval_loss': nan' # to reproduce the problem set distributed=False self.run_seqaseq_quick(distributed=_lowerCAmelCase , extra_args_str="--fp16 --fp16_backend=apex" ) @parameterized.expand(["base", "low", "high", "mixed"] ) @require_torch_multi_gpu def SCREAMING_SNAKE_CASE ( self: Union[str, Any] , _lowerCAmelCase: Any ): # as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout lowercase :List[Any] = { # test with the default log_level - should be info and thus log info once "base": {"extra_args_str": "", "n_matches": 1}, # test with low log_level and log_level_replica - should be noisy on all processes # now the info string should appear twice on 2 processes "low": {"extra_args_str": "--log_level debug --log_level_replica debug", "n_matches": 2}, # test with high log_level and low log_level_replica # now the info string should appear once only on the replica "high": {"extra_args_str": "--log_level error --log_level_replica debug", "n_matches": 1}, # test with high log_level and log_level_replica - should be quiet on all processes "mixed": {"extra_args_str": "--log_level error --log_level_replica error", "n_matches": 0}, } lowercase :str = experiments[experiment_id] lowercase :Dict = {"distributed": True, "predict_with_generate": False, "do_eval": False, "do_predict": False} lowercase :List[str] = "Running training" with CaptureStderr() as cl: self.run_seqaseq_quick(**_lowerCAmelCase , extra_args_str=data["extra_args_str"] ) lowercase :Dict = len(re.findall(_lowerCAmelCase , cl.err ) ) self.assertEqual(_lowerCAmelCase , data["n_matches"] ) @slow def SCREAMING_SNAKE_CASE ( self: List[str] ): lowercase :Dict = self.run_trainer( eval_steps=2 , max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=10 , distributed=_lowerCAmelCase , ) # Check metrics lowercase :List[str] = TrainerState.load_from_json(os.path.join(_lowerCAmelCase , "trainer_state.json" ) ).log_history lowercase :Dict = [log for log in logs if "eval_loss" in log.keys()] lowercase :str = eval_metrics[0] lowercase :Optional[int] = eval_metrics[-1] assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing" assert isinstance(last_step_stats["eval_bleu"] , _lowerCAmelCase ) # test if do_predict saves generations and metrics lowercase :Optional[Any] = os.listdir(_lowerCAmelCase ) lowercase :List[str] = {os.path.basename(_lowerCAmelCase ) for p in contents} assert "generated_predictions.txt" in contents assert "predict_results.json" in contents @slow @require_bitsandbytes def SCREAMING_SNAKE_CASE ( self: Tuple ): from transformers.training_args import OptimizerNames def train_and_return_metrics(_lowerCAmelCase: str ) -> Tuple[int, float]: lowercase :Tuple = "--skip_memory_metrics 0" lowercase :List[str] = self.run_trainer( max_len=1_28 , model_name=_lowerCAmelCase , learning_rate=3e-4 , num_train_epochs=1 , optim=_lowerCAmelCase , distributed=_lowerCAmelCase , extra_args_str=_lowerCAmelCase , do_eval=_lowerCAmelCase , do_predict=_lowerCAmelCase , n_gpus_to_use=1 , ) # Check metrics lowercase :List[str] = TrainerState.load_from_json(Path(_lowerCAmelCase , "trainer_state.json" ) ).log_history lowercase :Dict = int(logs[0]["train_mem_gpu_peaked_delta"] / 2**20 ) lowercase :Any = int(logs[0]["train_mem_gpu_alloc_delta"] / 2**20 ) lowercase :List[str] = logs[0]["train_loss"] return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss lowercase , lowercase , lowercase :Optional[Any] = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value ) lowercase , lowercase , lowercase :List[str] = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value ) lowercase :List[Any] = gpu_alloc_mem_orig - gpu_alloc_mem_bnb lowercase :List[str] = gpu_peak_mem_orig + gpu_alloc_mem_orig lowercase :List[str] = gpu_peak_mem_bnb + gpu_alloc_mem_bnb lowercase :Tuple = gpu_total_mem_orig - gpu_total_mem_bnb # sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which # doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized # in 2 bytes and the diff in optim memory usage is derived as so: # # - normal 25*8=~200MB (8 bytes per param) # - bnb 25*2= ~50MB (2 bytes per param) # # Thus we should expect ~150MB total memory saved. # # Peak memory should be the same - the total should be different by about that same margin # # After leaving a small margin to accommodate for differences between gpus let's check # that we have at least 120MB in savings lowercase :Union[str, Any] = 1_20 # uncomment the following if this test starts failing - requires py38 for a new print feature # gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb # print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB") # print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB") # print(f"{gpu_alloc_mem_diff=}MB") # print(f"{gpu_peak_mem_diff=}MB") # print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB") # print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB") self.assertGreater( _lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got" F" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and" F" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , ) self.assertGreater( _lowerCAmelCase , _lowerCAmelCase , "should use ~150MB less total gpu memory with BNB, compared to without it for this model but got" F" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and" F" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , ) self.assertEqual( _lowerCAmelCase , _lowerCAmelCase , F"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}" ) def SCREAMING_SNAKE_CASE ( self: Dict , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: float = 3e-3 , _lowerCAmelCase: str = "adafactor" , _lowerCAmelCase: bool = False , _lowerCAmelCase: str = None , _lowerCAmelCase: int = 0 , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: bool = True , _lowerCAmelCase: int = None , ): lowercase :Optional[int] = self.test_file_dir / "../fixtures/tests_samples/wmt_en_ro" lowercase :Optional[Any] = self.get_auto_remove_tmp_dir() lowercase :Tuple = F"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowerCAmelCase )}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowerCAmelCase )}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split() lowercase :Union[str, Any] = F"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowerCAmelCase )}\n ".split() lowercase :str = "\n --do_predict\n ".split() lowercase :Union[str, Any] = [] if do_train: args += args_train if do_eval: args += args_eval if do_predict: args += args_predict if predict_with_generate: args += "--predict_with_generate".split() if do_train: if optim == "adafactor": args += "--adafactor".split() else: args += F"--optim {optim}".split() if extra_args_str is not None: args += extra_args_str.split() if distributed: if n_gpus_to_use is None: lowercase :Optional[int] = get_gpu_count() lowercase :str = get_torch_dist_unique_port() lowercase :Union[str, Any] = F"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split() lowercase :Optional[int] = [sys.executable] + distributed_args + args # keep for quick debug # print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(_lowerCAmelCase , env=self.get_env() ) else: lowercase :Tuple = ["run_translation.py"] + args with patch.object(_lowerCAmelCase , "argv" , _lowerCAmelCase ): main() return output_dir
158
1
'''simple docstring''' import unittest import numpy as np from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline from diffusers.utils.testing_utils import ( is_onnx_available, load_image, nightly, require_onnxruntime, require_torch_gpu, ) from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin if is_onnx_available(): import onnxruntime as ort class __A ( A_ , unittest.TestCase ): # FIXME: add fast tests pass @nightly @require_onnxruntime @require_torch_gpu class __A ( unittest.TestCase ): @property def _lowercase (self : int ): return ( "CUDAExecutionProvider", { "gpu_mem_limit": "15000000000", # 15GB "arena_extend_strategy": "kSameAsRequested", }, ) @property def _lowercase (self : str ): UpperCAmelCase_ = ort.SessionOptions() UpperCAmelCase_ = False return options def _lowercase (self : List[Any] ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) UpperCAmelCase_ = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) UpperCAmelCase_ = "A red cat sitting on a park bench" UpperCAmelCase_ = np.random.RandomState(0 ) UpperCAmelCase_ = pipe( prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=10 , generator=_lowerCamelCase , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.25_14, 0.30_07, 0.35_17, 0.17_90, 0.23_82, 0.31_67, 0.19_44, 0.22_73, 0.24_64] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 def _lowercase (self : int ): UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo.png" ) UpperCAmelCase_ = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/in_paint/overture-creations-5sI6fQgYIuo_mask.png" ) UpperCAmelCase_ = LMSDiscreteScheduler.from_pretrained( "runwayml/stable-diffusion-inpainting" , subfolder="scheduler" , revision="onnx" ) UpperCAmelCase_ = OnnxStableDiffusionInpaintPipeline.from_pretrained( "runwayml/stable-diffusion-inpainting" , revision="onnx" , scheduler=_lowerCamelCase , safety_checker=_lowerCamelCase , feature_extractor=_lowerCamelCase , provider=self.gpu_provider , sess_options=self.gpu_options , ) pipe.set_progress_bar_config(disable=_lowerCamelCase ) UpperCAmelCase_ = "A red cat sitting on a park bench" UpperCAmelCase_ = np.random.RandomState(0 ) UpperCAmelCase_ = pipe( prompt=_lowerCamelCase , image=_lowerCamelCase , mask_image=_lowerCamelCase , guidance_scale=7.5 , num_inference_steps=20 , generator=_lowerCamelCase , output_type="np" , ) UpperCAmelCase_ = output.images UpperCAmelCase_ = images[0, 255:258, 255:258, -1] assert images.shape == (1, 512, 512, 3) UpperCAmelCase_ = np.array([0.00_86, 0.00_77, 0.00_83, 0.00_93, 0.01_07, 0.01_39, 0.00_94, 0.00_97, 0.01_25] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
1
"""simple docstring""" UpperCAmelCase__ = { 'Pillow': 'Pillow', 'accelerate': 'accelerate>=0.11.0', 'compel': 'compel==0.1.8', 'black': 'black~=23.1', 'datasets': 'datasets', 'filelock': 'filelock', 'flax': 'flax>=0.4.1', 'hf-doc-builder': 'hf-doc-builder>=0.3.0', 'huggingface-hub': 'huggingface-hub>=0.13.2', 'requests-mock': 'requests-mock==1.10.0', 'importlib_metadata': 'importlib_metadata', 'invisible-watermark': 'invisible-watermark', 'isort': 'isort>=5.5.4', 'jax': 'jax>=0.2.8,!=0.3.2', 'jaxlib': 'jaxlib>=0.1.65', 'Jinja2': 'Jinja2', 'k-diffusion': 'k-diffusion>=0.0.12', 'torchsde': 'torchsde', 'note_seq': 'note_seq', 'librosa': 'librosa', 'numpy': 'numpy', 'omegaconf': 'omegaconf', 'parameterized': 'parameterized', 'protobuf': 'protobuf>=3.20.3,<4', 'pytest': 'pytest', 'pytest-timeout': 'pytest-timeout', 'pytest-xdist': 'pytest-xdist', 'ruff': 'ruff>=0.0.241', 'safetensors': 'safetensors', 'sentencepiece': 'sentencepiece>=0.1.91,!=0.1.92', 'scipy': 'scipy', 'onnx': 'onnx', 'regex': 'regex!=2019.12.17', 'requests': 'requests', 'tensorboard': 'tensorboard', 'torch': 'torch>=1.4', 'torchvision': 'torchvision', 'transformers': 'transformers>=4.25.1', 'urllib3': 'urllib3<=2.0.0', }
288
0
__A = ''' # Installazione di Transformers ! pip install transformers datasets # Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e # rimuovi la modalità commento al comando seguente. # ! pip install git+https://github.com/huggingface/transformers.git ''' __A = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}] __A = { '''{processor_class}''': '''FakeProcessorClass''', '''{model_class}''': '''FakeModelClass''', '''{object_class}''': '''FakeObjectClass''', }
277
import shutil import tempfile import unittest import numpy as np import pytest from transformers.testing_utils import require_vision from transformers.utils import is_vision_available if is_vision_available(): from PIL import Image from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast @require_vision class lowercase ( unittest.TestCase): """simple docstring""" def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]: UpperCAmelCase_= tempfile.mkdtemp() UpperCAmelCase_= BlipImageProcessor() UpperCAmelCase_= GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" ) UpperCAmelCase_= BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase ) processor.save_pretrained(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int: return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[int]: return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]: shutil.rmtree(self.tmpdirname ) def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any: UpperCAmelCase_= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] UpperCAmelCase_= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict: UpperCAmelCase_= BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase_= self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" ) UpperCAmelCase_= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 ) UpperCAmelCase_= BlipaProcessor.from_pretrained( self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , __UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , __UpperCAmelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]: UpperCAmelCase_= self.get_image_processor() UpperCAmelCase_= self.get_tokenizer() UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) UpperCAmelCase_= self.prepare_image_inputs() UpperCAmelCase_= image_processor(__UpperCAmelCase , return_tensors="""np""" ) UpperCAmelCase_= processor(images=__UpperCAmelCase , return_tensors="""np""" ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 ) def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any: UpperCAmelCase_= self.get_image_processor() UpperCAmelCase_= self.get_tokenizer() UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) UpperCAmelCase_= """lower newer""" UpperCAmelCase_= processor(text=__UpperCAmelCase ) UpperCAmelCase_= tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict: UpperCAmelCase_= self.get_image_processor() UpperCAmelCase_= self.get_tokenizer() UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) UpperCAmelCase_= """lower newer""" UpperCAmelCase_= self.prepare_image_inputs() UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] ) # test if it raises when no input is passed with pytest.raises(__UpperCAmelCase ): processor() def _SCREAMING_SNAKE_CASE ( self : str ) -> Any: UpperCAmelCase_= self.get_image_processor() UpperCAmelCase_= self.get_tokenizer() UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) UpperCAmelCase_= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase_= processor.batch_decode(__UpperCAmelCase ) UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase ) self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase ) def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int: UpperCAmelCase_= self.get_image_processor() UpperCAmelCase_= self.get_tokenizer() UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase ) UpperCAmelCase_= """lower newer""" UpperCAmelCase_= self.prepare_image_inputs() UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase ) # For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask'] self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
277
1
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def _SCREAMING_SNAKE_CASE (__lowerCAmelCase ) -> float: '''simple docstring''' return np.dot(__lowerCAmelCase , __lowerCAmelCase ) class SCREAMING_SNAKE_CASE__ : def __init__( self : str , *, lowerCAmelCase_ : float = np.inf , lowerCAmelCase_ : str = "linear" , lowerCAmelCase_ : float = 0.0 , ): """simple docstring""" lowercase_ = regularization lowercase_ = gamma if kernel == "linear": lowercase_ = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("""rbf kernel requires gamma""") if not isinstance(self.gamma , (float, int)): raise ValueError("""gamma must be float or int""") if not self.gamma > 0: raise ValueError("""gamma must be > 0""") lowercase_ = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: lowercase_ = F'''Unknown kernel: {kernel}''' raise ValueError(lowerCAmelCase_) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : ndarray , lowerCAmelCase_ : ndarray): """simple docstring""" return np.dot(lowerCAmelCase_ , lowerCAmelCase_) def _UpperCAmelCase ( self : Dict , lowerCAmelCase_ : ndarray , lowerCAmelCase_ : ndarray): """simple docstring""" return np.exp(-(self.gamma * norm_squared(vectora - vectora))) def _UpperCAmelCase ( self : int , lowerCAmelCase_ : list[ndarray] , lowerCAmelCase_ : ndarray): """simple docstring""" lowercase_ = observations lowercase_ = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((lowercase_) , ) = np.shape(lowerCAmelCase_) def to_minimize(lowerCAmelCase_ : ndarray) -> float: lowercase_ = 0 ((lowercase_) , ) = np.shape(lowerCAmelCase_) for i in range(lowerCAmelCase_): for j in range(lowerCAmelCase_): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j]) ) return 1 / 2 * s - sum(lowerCAmelCase_) lowercase_ = LinearConstraint(lowerCAmelCase_ , 0 , 0) lowercase_ = Bounds(0 , self.regularization) lowercase_ = minimize( lowerCAmelCase_ , np.ones(lowerCAmelCase_) , bounds=lowerCAmelCase_ , constraints=[ly_contraint]).x lowercase_ = l_star # calculating mean offset of separation plane to points lowercase_ = 0 for i in range(lowerCAmelCase_): for j in range(lowerCAmelCase_): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j]) lowercase_ = s / n def _UpperCAmelCase ( self : Any , lowerCAmelCase_ : ndarray): """simple docstring""" lowercase_ = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , lowerCAmelCase_) for n in range(len(self.classes))) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
136
"""simple docstring""" from typing import Dict, Optional import numpy as np import datasets UpperCAmelCase : Tuple = "\nIoU is the area of overlap between the predicted segmentation and the ground truth divided by the area of union\nbetween the predicted segmentation and the ground truth. For binary (two classes) or multi-class segmentation,\nthe mean IoU of the image is calculated by taking the IoU of each class and averaging them.\n" UpperCAmelCase : Optional[int] = "\nArgs:\n predictions (`List[ndarray]`):\n List of predicted segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n references (`List[ndarray]`):\n List of ground truth segmentation maps, each of shape (height, width). Each segmentation map can be of a different size.\n num_labels (`int`):\n Number of classes (categories).\n ignore_index (`int`):\n Index that will be ignored during evaluation.\n nan_to_num (`int`, *optional*):\n If specified, NaN values will be replaced by the number defined by the user.\n label_map (`dict`, *optional*):\n If specified, dictionary mapping old label indices to new label indices.\n reduce_labels (`bool`, *optional*, defaults to `False`):\n Whether or not to reduce all label values of segmentation maps by 1. Usually used for datasets where 0 is used for background,\n and background itself is not included in all classes of a dataset (e.g. ADE20k). The background label will be replaced by 255.\n\nReturns:\n `Dict[str, float | ndarray]` comprising various elements:\n - *mean_iou* (`float`):\n Mean Intersection-over-Union (IoU averaged over all categories).\n - *mean_accuracy* (`float`):\n Mean accuracy (averaged over all categories).\n - *overall_accuracy* (`float`):\n Overall accuracy on all images.\n - *per_category_accuracy* (`ndarray` of shape `(num_labels,)`):\n Per category accuracy.\n - *per_category_iou* (`ndarray` of shape `(num_labels,)`):\n Per category IoU.\n\nExamples:\n\n >>> import numpy as np\n\n >>> mean_iou = datasets.load_metric(\"mean_iou\")\n\n >>> # suppose one has 3 different segmentation maps predicted\n >>> predicted_1 = np.array([[1, 2], [3, 4], [5, 255]])\n >>> actual_1 = np.array([[0, 3], [5, 4], [6, 255]])\n\n >>> predicted_2 = np.array([[2, 7], [9, 2], [3, 6]])\n >>> actual_2 = np.array([[1, 7], [9, 2], [3, 6]])\n\n >>> predicted_3 = np.array([[2, 2, 3], [8, 2, 4], [3, 255, 2]])\n >>> actual_3 = np.array([[1, 2, 2], [8, 2, 1], [3, 255, 1]])\n\n >>> predicted = [predicted_1, predicted_2, predicted_3]\n >>> ground_truth = [actual_1, actual_2, actual_3]\n\n >>> results = mean_iou.compute(predictions=predicted, references=ground_truth, num_labels=10, ignore_index=255, reduce_labels=False)\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {'mean_iou': 0.47750000000000004, 'mean_accuracy': 0.5916666666666666, 'overall_accuracy': 0.5263157894736842, 'per_category_iou': array([0. , 0. , 0.375, 0.4 , 0.5 , 0. , 0.5 , 1. , 1. , 1. ]), 'per_category_accuracy': array([0. , 0. , 0.75 , 0.66666667, 1. , 0. , 0.5 , 1. , 1. , 1. ])}\n" UpperCAmelCase : List[str] = "\\n@software{MMSegmentation_Contributors_OpenMMLab_Semantic_Segmentation_2020,\nauthor = {{MMSegmentation Contributors}},\nlicense = {Apache-2.0},\nmonth = {7},\ntitle = {{OpenMMLab Semantic Segmentation Toolbox and Benchmark}},\nurl = {https://github.com/open-mmlab/mmsegmentation},\nyear = {2020}\n}" def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Dict: '''simple docstring''' if label_map is not None: for old_id, new_id in label_map.items(): lowercase_ = new_id # turn into Numpy arrays lowercase_ = np.array(__lowerCAmelCase ) lowercase_ = np.array(__lowerCAmelCase ) if reduce_labels: lowercase_ = 2_55 lowercase_ = label - 1 lowercase_ = 2_55 lowercase_ = label != ignore_index lowercase_ = np.not_equal(__lowerCAmelCase , __lowerCAmelCase ) lowercase_ = pred_label[mask] lowercase_ = np.array(__lowerCAmelCase )[mask] lowercase_ = pred_label[pred_label == label] lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0] lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0] lowercase_ = np.histogram(__lowerCAmelCase , bins=__lowerCAmelCase , range=(0, num_labels - 1) )[0] lowercase_ = area_pred_label + area_label - area_intersect return area_intersect, area_union, area_pred_label, area_label def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Optional[Any]: '''simple docstring''' lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) lowercase_ = np.zeros((num_labels,) , dtype=np.floataa ) for result, gt_seg_map in zip(__lowerCAmelCase , __lowerCAmelCase ): lowercase_ , lowercase_ , lowercase_ , lowercase_ = intersect_and_union( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) total_area_intersect += area_intersect total_area_union += area_union total_area_pred_label += area_pred_label total_area_label += area_label return total_area_intersect, total_area_union, total_area_pred_label, total_area_label def _SCREAMING_SNAKE_CASE (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = False , ) -> Any: '''simple docstring''' lowercase_ , lowercase_ , lowercase_ , lowercase_ = total_intersect_and_union( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) # compute metrics lowercase_ = {} lowercase_ = total_area_intersect.sum() / total_area_label.sum() lowercase_ = total_area_intersect / total_area_union lowercase_ = total_area_intersect / total_area_label lowercase_ = np.nanmean(__lowerCAmelCase ) lowercase_ = np.nanmean(__lowerCAmelCase ) lowercase_ = all_acc lowercase_ = iou lowercase_ = acc if nan_to_num is not None: lowercase_ = {metric: np.nan_to_num(__lowerCAmelCase , nan=__lowerCAmelCase ) for metric, metric_value in metrics.items()} return metrics @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class SCREAMING_SNAKE_CASE__ ( datasets.Metric ): def _UpperCAmelCase ( self : Union[str, Any]): """simple docstring""" return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( # 1st Seq - height dim, 2nd - width dim { """predictions""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))), """references""": datasets.Sequence(datasets.Sequence(datasets.Value("""uint16"""))), }) , reference_urls=[ """https://github.com/open-mmlab/mmsegmentation/blob/71c201b1813267d78764f306a297ca717827c4bf/mmseg/core/evaluation/metrics.py""" ] , ) def _UpperCAmelCase ( self : Tuple , lowerCAmelCase_ : Any , lowerCAmelCase_ : Dict , lowerCAmelCase_ : int , lowerCAmelCase_ : bool , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[Dict[int, int]] = None , lowerCAmelCase_ : bool = False , ): """simple docstring""" lowercase_ = mean_iou( results=lowerCAmelCase_ , gt_seg_maps=lowerCAmelCase_ , num_labels=lowerCAmelCase_ , ignore_index=lowerCAmelCase_ , nan_to_num=lowerCAmelCase_ , label_map=lowerCAmelCase_ , reduce_labels=lowerCAmelCase_ , ) return iou_result
136
1
"""simple docstring""" import logging import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.models.bert.modeling_bert import ( BERT_INPUTS_DOCSTRING, BERT_START_DOCSTRING, BertEncoder, BertModel, BertPreTrainedModel, ) _A = logging.getLogger(__name__) class _lowercase ( __UpperCAmelCase ): def _UpperCamelCase ( self , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_=None , UpperCAmelCase_=None ) -> Optional[int]: lowerCamelCase : Any = self.layer[current_layer](UpperCAmelCase_ , UpperCAmelCase_ , head_mask[current_layer] ) lowerCamelCase : int = layer_outputs[0] return hidden_states @add_start_docstrings( 'The bare Bert Model transformer with PABEE outputting raw hidden-states without any specific head on top.' , __UpperCAmelCase , ) class _lowercase ( __UpperCAmelCase ): def __init__( self , UpperCAmelCase_ ) -> List[Any]: super().__init__(UpperCAmelCase_ ) lowerCamelCase : Tuple = BertEncoderWithPabee(UpperCAmelCase_ ) self.init_weights() lowerCamelCase : List[Any] = 0 lowerCamelCase : Dict = 0 lowerCamelCase : Any = 0 lowerCamelCase : List[str] = 0 def _UpperCamelCase ( self , UpperCAmelCase_ ) -> Tuple: lowerCamelCase : List[Any] = threshold def _UpperCamelCase ( self , UpperCAmelCase_ ) -> List[str]: lowerCamelCase : Optional[Any] = patience def _UpperCamelCase ( self ) -> Any: lowerCamelCase : str = 0 lowerCamelCase : List[str] = 0 def _UpperCamelCase ( self ) -> List[str]: lowerCamelCase : Tuple = self.inference_layers_num / self.inference_instances_num lowerCamelCase : str = ( F"""*** Patience = {self.patience} Avg. Inference Layers = {avg_inf_layers:.2f} Speed Up =""" F""" {1 - avg_inf_layers / self.config.num_hidden_layers:.2f} ***""" ) print(UpperCAmelCase_ ) @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) def _UpperCamelCase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=False , ) -> Tuple: if input_ids is not None and inputs_embeds is not None: raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' ) elif input_ids is not None: lowerCamelCase : Union[str, Any] = input_ids.size() elif inputs_embeds is not None: lowerCamelCase : Optional[Any] = inputs_embeds.size()[:-1] else: raise ValueError('You have to specify either input_ids or inputs_embeds' ) lowerCamelCase : Dict = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: lowerCamelCase : Any = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ ) if token_type_ids is None: lowerCamelCase : Union[str, Any] = torch.zeros(UpperCAmelCase_ , dtype=torch.long , device=UpperCAmelCase_ ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. lowerCamelCase : torch.Tensor = self.get_extended_attention_mask(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = encoder_hidden_states.size() lowerCamelCase : int = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: lowerCamelCase : str = torch.ones(UpperCAmelCase_ , device=UpperCAmelCase_ ) lowerCamelCase : Union[str, Any] = self.invert_attention_mask(UpperCAmelCase_ ) else: lowerCamelCase : Optional[Any] = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] lowerCamelCase : Tuple = self.get_head_mask(UpperCAmelCase_ , self.config.num_hidden_layers ) lowerCamelCase : Tuple = self.embeddings( input_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ ) lowerCamelCase : Optional[int] = embedding_output if self.training: lowerCamelCase : Tuple = [] for i in range(self.config.num_hidden_layers ): lowerCamelCase : Dict = self.encoder.adaptive_forward( UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ ) lowerCamelCase : List[str] = self.pooler(UpperCAmelCase_ ) lowerCamelCase : int = output_layers[i](output_dropout(UpperCAmelCase_ ) ) res.append(UpperCAmelCase_ ) elif self.patience == 0: # Use all layers for inference lowerCamelCase : Optional[Any] = self.encoder( UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , encoder_attention_mask=UpperCAmelCase_ , ) lowerCamelCase : Any = self.pooler(encoder_outputs[0] ) lowerCamelCase : str = [output_layers[self.config.num_hidden_layers - 1](UpperCAmelCase_ )] else: lowerCamelCase : Union[str, Any] = 0 lowerCamelCase : Union[str, Any] = None lowerCamelCase : Any = 0 for i in range(self.config.num_hidden_layers ): calculated_layer_num += 1 lowerCamelCase : int = self.encoder.adaptive_forward( UpperCAmelCase_ , current_layer=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , head_mask=UpperCAmelCase_ ) lowerCamelCase : Optional[Any] = self.pooler(UpperCAmelCase_ ) lowerCamelCase : Tuple = output_layers[i](UpperCAmelCase_ ) if regression: lowerCamelCase : List[Any] = logits.detach() if patient_result is not None: lowerCamelCase : str = patient_result.detach() if (patient_result is not None) and torch.abs(patient_result - labels ) < self.regression_threshold: patient_counter += 1 else: lowerCamelCase : Tuple = 0 else: lowerCamelCase : str = logits.detach().argmax(dim=1 ) if patient_result is not None: lowerCamelCase : List[Any] = patient_result.detach().argmax(dim=1 ) if (patient_result is not None) and torch.all(labels.eq(UpperCAmelCase_ ) ): patient_counter += 1 else: lowerCamelCase : Dict = 0 lowerCamelCase : Optional[int] = logits if patient_counter == self.patience: break lowerCamelCase : List[str] = [patient_result] self.inference_layers_num += calculated_layer_num self.inference_instances_num += 1 return res @add_start_docstrings( 'Bert Model transformer with PABEE and a sequence classification/regression head on top (a linear layer on top of\n the pooled output) e.g. for GLUE tasks. ' , __UpperCAmelCase , ) class _lowercase ( __UpperCAmelCase ): def __init__( self , UpperCAmelCase_ ) -> Any: super().__init__(UpperCAmelCase_ ) lowerCamelCase : Dict = config.num_labels lowerCamelCase : Optional[Any] = BertModelWithPabee(UpperCAmelCase_ ) lowerCamelCase : Any = nn.Dropout(config.hidden_dropout_prob ) lowerCamelCase : Dict = nn.ModuleList( [nn.Linear(config.hidden_size , self.config.num_labels ) for _ in range(config.num_hidden_layers )] ) self.init_weights() @add_start_docstrings_to_model_forward(UpperCAmelCase_ ) def _UpperCamelCase ( self , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , UpperCAmelCase_=None , ) -> Any: lowerCamelCase : int = self.bert( input_ids=UpperCAmelCase_ , attention_mask=UpperCAmelCase_ , token_type_ids=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , head_mask=UpperCAmelCase_ , inputs_embeds=UpperCAmelCase_ , output_dropout=self.dropout , output_layers=self.classifiers , regression=self.num_labels == 1 , ) lowerCamelCase : Any = (logits[-1],) if labels is not None: lowerCamelCase : str = None lowerCamelCase : Optional[Any] = 0 for ix, logits_item in enumerate(UpperCAmelCase_ ): if self.num_labels == 1: # We are doing regression lowerCamelCase : str = MSELoss() lowerCamelCase : Optional[int] = loss_fct(logits_item.view(-1 ) , labels.view(-1 ) ) else: lowerCamelCase : Union[str, Any] = CrossEntropyLoss() lowerCamelCase : Any = loss_fct(logits_item.view(-1 , self.num_labels ) , labels.view(-1 ) ) if total_loss is None: lowerCamelCase : Optional[Any] = loss else: total_loss += loss * (ix + 1) total_weights += ix + 1 lowerCamelCase : List[Any] = (total_loss / total_weights,) + outputs return outputs
205
"""simple docstring""" import argparse import hashlib import os import urllib import warnings import torch from torch import nn from tqdm import tqdm from transformers import WhisperConfig, WhisperForConditionalGeneration _A = { 'tiny.en': 'https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt', 'tiny': 'https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt', 'base.en': 'https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt', 'base': 'https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt', 'small.en': 'https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt', 'small': 'https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt', 'medium.en': 'https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt', 'medium': 'https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt', 'large': 'https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large.pt', 'large-v2': 'https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt', } def UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase : str = ['layers', 'blocks'] for k in ignore_keys: state_dict.pop(a_, a_ ) _A = { 'blocks': 'layers', 'mlp.0': 'fc1', 'mlp.2': 'fc2', 'mlp_ln': 'final_layer_norm', '.attn.query': '.self_attn.q_proj', '.attn.key': '.self_attn.k_proj', '.attn.value': '.self_attn.v_proj', '.attn_ln': '.self_attn_layer_norm', '.attn.out': '.self_attn.out_proj', '.cross_attn.query': '.encoder_attn.q_proj', '.cross_attn.key': '.encoder_attn.k_proj', '.cross_attn.value': '.encoder_attn.v_proj', '.cross_attn_ln': '.encoder_attn_layer_norm', '.cross_attn.out': '.encoder_attn.out_proj', 'decoder.ln.': 'decoder.layer_norm.', 'encoder.ln.': 'encoder.layer_norm.', 'token_embedding': 'embed_tokens', 'encoder.positional_embedding': 'encoder.embed_positions.weight', 'decoder.positional_embedding': 'decoder.embed_positions.weight', 'ln_post': 'layer_norm', } def UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase : Tuple = list(s_dict.keys() ) for key in keys: lowerCamelCase : List[Any] = key for k, v in WHISPER_MAPPING.items(): if k in key: lowerCamelCase : Optional[int] = new_key.replace(a_, a_ ) print(F"""{key} -> {new_key}""" ) lowerCamelCase : Any = s_dict.pop(a_ ) return s_dict def UpperCAmelCase ( a_ ): '''simple docstring''' lowerCamelCase , lowerCamelCase : int = emb.weight.shape lowerCamelCase : Dict = nn.Linear(a_, a_, bias=a_ ) lowerCamelCase : Union[str, Any] = emb.weight.data return lin_layer def UpperCAmelCase ( a_, a_ ): '''simple docstring''' os.makedirs(a_, exist_ok=a_ ) lowerCamelCase : Union[str, Any] = os.path.basename(a_ ) lowerCamelCase : Any = url.split('/' )[-2] lowerCamelCase : Tuple = os.path.join(a_, a_ ) if os.path.exists(a_ ) and not os.path.isfile(a_ ): raise RuntimeError(F"""{download_target} exists and is not a regular file""" ) if os.path.isfile(a_ ): lowerCamelCase : Union[str, Any] = open(a_, 'rb' ).read() if hashlib.shaaaa(a_ ).hexdigest() == expected_shaaaa: return model_bytes else: warnings.warn(F"""{download_target} exists, but the SHA256 checksum does not match; re-downloading the file""" ) with urllib.request.urlopen(a_ ) as source, open(a_, 'wb' ) as output: with tqdm( total=int(source.info().get('Content-Length' ) ), ncols=80, unit='iB', unit_scale=a_, unit_divisor=1024 ) as loop: while True: lowerCamelCase : Union[str, Any] = source.read(8192 ) if not buffer: break output.write(a_ ) loop.update(len(a_ ) ) lowerCamelCase : int = open(a_, 'rb' ).read() if hashlib.shaaaa(a_ ).hexdigest() != expected_shaaaa: raise RuntimeError( 'Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model.' ) return model_bytes def UpperCAmelCase ( a_, a_ ): '''simple docstring''' if ".pt" not in checkpoint_path: lowerCamelCase : str = _download(_MODELS[checkpoint_path] ) else: lowerCamelCase : Any = torch.load(a_, map_location='cpu' ) lowerCamelCase : List[str] = original_checkpoint['dims'] lowerCamelCase : Any = original_checkpoint['model_state_dict'] lowerCamelCase : Tuple = state_dict['decoder.token_embedding.weight'] remove_ignore_keys_(a_ ) rename_keys(a_ ) lowerCamelCase : List[Any] = True lowerCamelCase : str = state_dict['decoder.layers.0.fc1.weight'].shape[0] lowerCamelCase : Optional[int] = WhisperConfig( vocab_size=dimensions['n_vocab'], encoder_ffn_dim=a_, decoder_ffn_dim=a_, num_mel_bins=dimensions['n_mels'], d_model=dimensions['n_audio_state'], max_target_positions=dimensions['n_text_ctx'], encoder_layers=dimensions['n_audio_layer'], encoder_attention_heads=dimensions['n_audio_head'], decoder_layers=dimensions['n_text_layer'], decoder_attention_heads=dimensions['n_text_state'], max_source_positions=dimensions['n_audio_ctx'], ) lowerCamelCase : Union[str, Any] = WhisperForConditionalGeneration(a_ ) lowerCamelCase , lowerCamelCase : Optional[int] = model.model.load_state_dict(a_, strict=a_ ) if len(a_ ) > 0 and not set(a_ ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( 'Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,' F""" but all the following weights are missing {missing}""" ) if tie_embeds: lowerCamelCase : List[Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: lowerCamelCase : Tuple = proj_out_weights model.save_pretrained(a_ ) if __name__ == "__main__": _A = argparse.ArgumentParser() # # Required parameters parser.add_argument('--checkpoint_path', type=str, help='Patht to the downloaded checkpoints') parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.') _A = parser.parse_args() convert_openai_whisper_to_tfms(args.checkpoint_path, args.pytorch_dump_folder_path)
205
1
'''simple docstring''' import tempfile import unittest from make_student import create_student_by_copying_alternating_layers from transformers import AutoConfig from transformers.file_utils import cached_property from transformers.testing_utils import require_torch __a = "sshleifer/bart-tiny-random" __a = "patrickvonplaten/t5-tiny-random" @require_torch class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" @cached_property def lowerCamelCase ( self : List[Any] ): return AutoConfig.from_pretrained(snake_case_ ) def lowerCamelCase ( self : Dict ): snake_case__ , *snake_case__ : List[str] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.num_hidden_layers , 1 ) def lowerCamelCase ( self : Tuple ): snake_case__ , *snake_case__ : Tuple = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ ) def lowerCamelCase ( self : Any ): snake_case__ , *snake_case__ : Tuple = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=snake_case_ ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers ) def lowerCamelCase ( self : Dict ): snake_case__ , *snake_case__ : Optional[int] = create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=1 , d=1 ) self.assertEqual(student.config.encoder_layers , 1 ) self.assertEqual(student.config.decoder_layers , 1 ) def lowerCamelCase ( self : Tuple ): with self.assertRaises(snake_case_ ): create_student_by_copying_alternating_layers(snake_case_ , tempfile.mkdtemp() , e=snake_case_ , d=snake_case_ )
35
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPImageProcessor, CLIPProcessor @require_vision class UpperCAmelCase ( unittest.TestCase ): def UpperCAmelCase_ ( self :Optional[Any] )-> Tuple: A__ = tempfile.mkdtemp() # fmt: off A__ = ["l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on A__ = dict(zip(lowercase_ , range(len(lowercase_ ) ) ) ) A__ = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] A__ = {"unk_token": "<unk>"} A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(lowercase_ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(lowercase_ ) ) A__ = { "do_resize": True, "size": 20, "do_center_crop": True, "crop_size": 18, "do_normalize": True, "image_mean": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], "image_std": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } A__ = os.path.join(self.tmpdirname , lowercase_ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(lowercase_ , lowercase_ ) def UpperCAmelCase_ ( self :Any , **lowercase_ :Union[str, Any] )-> Tuple: return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCAmelCase_ ( self :Any , **lowercase_ :Tuple )-> Dict: return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCAmelCase_ ( self :Dict , **lowercase_ :Union[str, Any] )-> Any: return CLIPImageProcessor.from_pretrained(self.tmpdirname , **lowercase_ ) def UpperCAmelCase_ ( self :List[str] )-> int: shutil.rmtree(self.tmpdirname ) def UpperCAmelCase_ ( self :Optional[int] )-> Optional[int]: A__ = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )] A__ = [Image.fromarray(np.moveaxis(lowercase_ , 0 , -1 ) ) for x in image_inputs] return image_inputs def UpperCAmelCase_ ( self :int )-> List[Any]: A__ = self.get_tokenizer() A__ = self.get_rust_tokenizer() A__ = self.get_image_processor() A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_slow.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=lowercase_ ) A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) processor_fast.save_pretrained(self.tmpdirname ) A__ = CLIPProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , lowercase_ ) self.assertIsInstance(processor_fast.tokenizer , lowercase_ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , lowercase_ ) self.assertIsInstance(processor_fast.image_processor , lowercase_ ) def UpperCAmelCase_ ( self :Optional[Any] )-> Optional[int]: A__ = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) A__ = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) A__ = self.get_image_processor(do_normalize=lowercase_ , padding_value=1.0 ) A__ = CLIPProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=lowercase_ , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , lowercase_ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , lowercase_ ) def UpperCAmelCase_ ( self :List[Any] )-> Tuple: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) A__ = self.prepare_image_inputs() A__ = image_processor(lowercase_ , return_tensors="np" ) A__ = processor(images=lowercase_ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 ) def UpperCAmelCase_ ( self :Optional[int] )-> Dict: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) A__ = "lower newer" A__ = processor(text=lowercase_ ) A__ = tokenizer(lowercase_ ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def UpperCAmelCase_ ( self :str )-> Any: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) A__ = "lower newer" A__ = self.prepare_image_inputs() A__ = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(lowercase_ ): processor() def UpperCAmelCase_ ( self :Tuple )-> Tuple: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] A__ = processor.batch_decode(lowercase_ ) A__ = tokenizer.batch_decode(lowercase_ ) self.assertListEqual(lowercase_ , lowercase_ ) def UpperCAmelCase_ ( self :List[Any] )-> Dict: A__ = self.get_image_processor() A__ = self.get_tokenizer() A__ = CLIPProcessor(tokenizer=lowercase_ , image_processor=lowercase_ ) A__ = "lower newer" A__ = self.prepare_image_inputs() A__ = processor(text=lowercase_ , images=lowercase_ ) self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
237
0
'''simple docstring''' def UpperCamelCase_( snake_case : str , snake_case : bool = False ): '''simple docstring''' if not isinstance(snake_case , snake_case ): snake_case_ = f'Expected string as input, found {type(snake_case )}' raise ValueError(snake_case ) if not isinstance(snake_case , snake_case ): snake_case_ = f'Expected boolean as use_pascal parameter, found {type(snake_case )}' raise ValueError(snake_case ) snake_case_ = input_str.split("_" ) snake_case_ = 0 if use_pascal else 1 snake_case_ = words[start_index:] snake_case_ = [word[0].upper() + word[1:] for word in words_to_capitalize] snake_case_ = "" if use_pascal else words[0] return "".join([initial_word, *capitalized_words] ) if __name__ == "__main__": from doctest import testmod testmod()
92
'''simple docstring''' import os import torch from ..logging import get_logger from .constants import FSDP_PYTORCH_VERSION, MODEL_NAME, OPTIMIZER_NAME from .versions import is_torch_version if is_torch_version(">=", FSDP_PYTORCH_VERSION): import torch.distributed.checkpoint as dist_cp from torch.distributed.checkpoint.default_planner import DefaultLoadPlanner, DefaultSavePlanner from torch.distributed.checkpoint.optimizer import load_sharded_optimizer_state_dict from torch.distributed.fsdp.fully_sharded_data_parallel import FullyShardedDataParallel as FSDP from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType _SCREAMING_SNAKE_CASE : Tuple = get_logger(__name__) def UpperCamelCase_( snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : Optional[Any] , snake_case : int , snake_case : List[Any]=0 ): '''simple docstring''' os.makedirs(snake_case , exist_ok=snake_case ) with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ = model.state_dict() if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' snake_case_ = os.path.join(snake_case , snake_case ) if accelerator.process_index == 0: logger.info(f'Saving model to {output_model_file}' ) torch.save(snake_case , snake_case ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) snake_case_ = os.path.join(snake_case , snake_case ) logger.info(f'Saving model to {output_model_file}' ) torch.save(snake_case , snake_case ) logger.info(f'Model saved to {output_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ = os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' ) os.makedirs(snake_case , exist_ok=snake_case ) logger.info(f'Saving model to {ckpt_dir}' ) snake_case_ = {"model": state_dict} dist_cp.save_state_dict( state_dict=snake_case , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , ) logger.info(f'Model saved to {ckpt_dir}' ) def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[Any] , snake_case : int , snake_case : Union[str, Any] , snake_case : Any=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if type(snake_case ) != FSDP and accelerator.process_index != 0: if not fsdp_plugin.sync_module_states: raise ValueError( "Set the `sync_module_states` flag to `True` so that model states are synced across processes when " "initializing FSDP object" ) return snake_case_ = f'{MODEL_NAME}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}.bin' snake_case_ = os.path.join(snake_case , snake_case ) logger.info(f'Loading model from {input_model_file}' ) snake_case_ = torch.load(snake_case ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.LOCAL_STATE_DICT: snake_case_ = ( f'{MODEL_NAME}_rank{accelerator.process_index}.bin' if model_index == 0 else f'{MODEL_NAME}_{model_index}_rank{accelerator.process_index}.bin' ) snake_case_ = os.path.join(snake_case , snake_case ) logger.info(f'Loading model from {input_model_file}' ) snake_case_ = torch.load(snake_case ) logger.info(f'Model loaded from {input_model_file}' ) elif fsdp_plugin.state_dict_type == StateDictType.SHARDED_STATE_DICT: snake_case_ = ( os.path.join(snake_case , f'{MODEL_NAME}_{model_index}' ) if f'{MODEL_NAME}' not in input_dir else input_dir ) logger.info(f'Loading model from {ckpt_dir}' ) snake_case_ = {"model": model.state_dict()} dist_cp.load_state_dict( state_dict=snake_case , storage_reader=dist_cp.FileSystemReader(snake_case ) , planner=DefaultLoadPlanner() , ) snake_case_ = state_dict["model"] logger.info(f'Model loaded from {ckpt_dir}' ) model.load_state_dict(snake_case ) def UpperCamelCase_( snake_case : str , snake_case : List[str] , snake_case : Any , snake_case : Tuple , snake_case : Optional[Any] , snake_case : Tuple=0 ): '''simple docstring''' os.makedirs(snake_case , exist_ok=snake_case ) with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): snake_case_ = FSDP.optim_state_dict(snake_case , snake_case ) if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: if accelerator.process_index == 0: snake_case_ = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) snake_case_ = os.path.join(snake_case , snake_case ) logger.info(f'Saving Optimizer state to {output_optimizer_file}' ) torch.save(snake_case , snake_case ) logger.info(f'Optimizer state saved in {output_optimizer_file}' ) else: snake_case_ = os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' ) os.makedirs(snake_case , exist_ok=snake_case ) logger.info(f'Saving Optimizer state to {ckpt_dir}' ) dist_cp.save_state_dict( state_dict={"optimizer": optim_state} , storage_writer=dist_cp.FileSystemWriter(snake_case ) , planner=DefaultSavePlanner() , ) logger.info(f'Optimizer state saved in {ckpt_dir}' ) def UpperCamelCase_( snake_case : Optional[Any] , snake_case : List[str] , snake_case : Union[str, Any] , snake_case : int , snake_case : Optional[int] , snake_case : Union[str, Any]=0 ): '''simple docstring''' accelerator.wait_for_everyone() with FSDP.state_dict_type( snake_case , fsdp_plugin.state_dict_type , fsdp_plugin.state_dict_config , fsdp_plugin.optim_state_dict_config ): if fsdp_plugin.state_dict_type == StateDictType.FULL_STATE_DICT: snake_case_ = None # below check should work but currently it isn't working (mostly opytorch issue), # in the meantime disabling it at the cost of excess memory usage # if accelerator.process_index == 0 or not fsdp_plugin.optim_state_dict_config.rank0_only: snake_case_ = ( f'{OPTIMIZER_NAME}.bin' if optimizer_index == 0 else f'{OPTIMIZER_NAME}_{optimizer_index}.bin' ) snake_case_ = os.path.join(snake_case , snake_case ) logger.info(f'Loading Optimizer state from {input_optimizer_file}' ) snake_case_ = torch.load(snake_case ) logger.info(f'Optimizer state loaded from {input_optimizer_file}' ) else: snake_case_ = ( os.path.join(snake_case , f'{OPTIMIZER_NAME}_{optimizer_index}' ) if f'{OPTIMIZER_NAME}' not in input_dir else input_dir ) logger.info(f'Loading Optimizer from {ckpt_dir}' ) snake_case_ = load_sharded_optimizer_state_dict( model_state_dict=model.state_dict() , optimizer_key="optimizer" , storage_reader=dist_cp.FileSystemReader(snake_case ) , ) snake_case_ = optim_state["optimizer"] logger.info(f'Optimizer loaded from {ckpt_dir}' ) snake_case_ = FSDP.optim_state_dict_to_load(snake_case , snake_case , snake_case ) optimizer.load_state_dict(snake_case )
92
1
import re import time from typing import Optional import IPython.display as disp from ..trainer_callback import TrainerCallback from ..trainer_utils import IntervalStrategy, has_length def UpperCamelCase( __UpperCamelCase : Optional[int] ): lowerCAmelCase_ : Union[str, Any] = int(__UpperCamelCase ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = t // 3600, (t // 60) % 60, t % 60 return f"""{h}:{m:02d}:{s:02d}""" if h != 0 else f"""{m:02d}:{s:02d}""" def UpperCamelCase( __UpperCamelCase : Dict ,__UpperCamelCase : Tuple ,__UpperCamelCase : List[str] ,__UpperCamelCase : Optional[int] ,__UpperCamelCase : int=300 ): # docstyle-ignore return f""" <div> {prefix} <progress value='{value}' max='{total}' style='width:{width}px; height:20px; vertical-align: middle;'></progress> {label} </div> """ def UpperCamelCase( __UpperCamelCase : int ): lowerCAmelCase_ : str = '''<table border="1" class="dataframe">\n''' html_code += """ <thead>\n <tr style="text-align: left;">\n""" for i in items[0]: html_code += f""" <th>{i}</th>\n""" html_code += " </tr>\n </thead>\n <tbody>\n" for line in items[1:]: html_code += " <tr>\n" for elt in line: lowerCAmelCase_ : Optional[Any] = f"""{elt:.6f}""" if isinstance(__UpperCamelCase ,__UpperCamelCase ) else str(__UpperCamelCase ) html_code += f""" <td>{elt}</td>\n""" html_code += " </tr>\n" html_code += " </tbody>\n</table><p>" return html_code class __snake_case : _a = 5 _a = 0.2 def __init__( self : Optional[Any] , A_ : int , A_ : Optional[str] = None , A_ : bool = True , A_ : Optional["NotebookTrainingTracker"] = None , A_ : int = 3_0_0 , ): lowerCAmelCase_ : Union[str, Any] = total lowerCAmelCase_ : int = '''''' if prefix is None else prefix lowerCAmelCase_ : Optional[Any] = leave lowerCAmelCase_ : Union[str, Any] = parent lowerCAmelCase_ : Any = width lowerCAmelCase_ : Optional[Any] = None lowerCAmelCase_ : Tuple = None lowerCAmelCase_ : str = None def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int , A_ : bool = False , A_ : str = None): lowerCAmelCase_ : Dict = value if comment is not None: lowerCAmelCase_ : List[str] = comment if self.last_value is None: lowerCAmelCase_ : List[str] = time.time() lowerCAmelCase_ : Dict = value lowerCAmelCase_ : Union[str, Any] = None lowerCAmelCase_ : str = self.warmup lowerCAmelCase_ : List[Any] = 1 self.update_bar(A_) elif value <= self.last_value and not force_update: return elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total): if self.first_calls > 0: self.first_calls -= 1 lowerCAmelCase_ : Tuple = time.time() lowerCAmelCase_ : List[str] = current_time - self.start_time # We could have value = self.start_value if the update is called twixe with the same start value. if value > self.start_value: lowerCAmelCase_ : int = self.elapsed_time / (value - self.start_value) else: lowerCAmelCase_ : str = None if value >= self.total: lowerCAmelCase_ : int = self.total lowerCAmelCase_ : List[str] = None if not self.leave: self.close() elif self.average_time_per_item is not None: lowerCAmelCase_ : List[str] = self.average_time_per_item * (self.total - value) self.update_bar(A_) lowerCAmelCase_ : Tuple = value lowerCAmelCase_ : Optional[int] = current_time if self.average_time_per_item is None: lowerCAmelCase_ : List[Any] = 1 else: lowerCAmelCase_ : Any = max(int(self.update_every / self.average_time_per_item) , 1) def UpperCAmelCase__ ( self : int , A_ : Optional[int] , A_ : List[str]=None): lowerCAmelCase_ : str = ''' ''' * (len(str(self.total)) - len(str(A_))) + str(A_) if self.elapsed_time is None: lowerCAmelCase_ : str = F"""[{spaced_value}/{self.total} : < :""" elif self.predicted_remaining is None: lowerCAmelCase_ : Tuple = F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)}""" else: lowerCAmelCase_ : Any = ( F"""[{spaced_value}/{self.total} {format_time(self.elapsed_time)} <""" F""" {format_time(self.predicted_remaining)}""" ) self.label += F""", {1/self.average_time_per_item:.2f} it/s""" self.label += "]" if self.comment is None or len(self.comment) == 0 else F""", {self.comment}]""" self.display() def UpperCAmelCase__ ( self : Tuple): lowerCAmelCase_ : Union[str, Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width) if self.parent is not None: # If this is a child bar, the parent will take care of the display. self.parent.display() return if self.output is None: lowerCAmelCase_ : int = disp.display(disp.HTML(self.html_code) , display_id=A_) else: self.output.update(disp.HTML(self.html_code)) def UpperCAmelCase__ ( self : Optional[int]): if self.parent is None and self.output is not None: self.output.update(disp.HTML('''''')) class __snake_case ( UpperCamelCase_ ): def __init__( self : Union[str, Any] , A_ : Optional[int] , A_ : Dict=None): super().__init__(A_) lowerCAmelCase_ : int = None if column_names is None else [column_names] lowerCAmelCase_ : Optional[int] = None def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : Optional[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width) if self.inner_table is not None: self.html_code += text_to_html_table(self.inner_table) if self.child_bar is not None: self.html_code += self.child_bar.html_code if self.output is None: lowerCAmelCase_ : Optional[Any] = disp.display(disp.HTML(self.html_code) , display_id=A_) else: self.output.update(disp.HTML(self.html_code)) def UpperCAmelCase__ ( self : List[str] , A_ : Optional[int]): if self.inner_table is None: lowerCAmelCase_ : List[Any] = [list(values.keys()), list(values.values())] else: lowerCAmelCase_ : List[Any] = self.inner_table[0] if len(self.inner_table) == 1: # We give a chance to update the column names at the first iteration for key in values.keys(): if key not in columns: columns.append(A_) lowerCAmelCase_ : str = columns self.inner_table.append([values[c] for c in columns]) def UpperCAmelCase__ ( self : Optional[Any] , A_ : Optional[Any] , A_ : Any=None , A_ : Optional[int]=3_0_0): lowerCAmelCase_ : Any = NotebookProgressBar(A_ , prefix=A_ , parent=self , width=A_) return self.child_bar def UpperCAmelCase__ ( self : List[str]): lowerCAmelCase_ : Dict = None self.display() class __snake_case ( UpperCamelCase_ ): def __init__( self : Dict): lowerCAmelCase_ : str = None lowerCAmelCase_ : List[str] = None lowerCAmelCase_ : Tuple = False def UpperCAmelCase__ ( self : int , A_ : Optional[Any] , A_ : List[Any] , A_ : Union[str, Any] , **A_ : str): lowerCAmelCase_ : str = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step''' lowerCAmelCase_ : Optional[int] = 0 lowerCAmelCase_ : Any = 0 lowerCAmelCase_ : List[str] = [self.first_column] + ['''Training Loss'''] if args.evaluation_strategy != IntervalStrategy.NO: column_names.append('''Validation Loss''') lowerCAmelCase_ : str = NotebookTrainingTracker(state.max_steps , A_) def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : Optional[int] , A_ : List[str] , **A_ : List[Any]): lowerCAmelCase_ : int = int(state.epoch) if int(state.epoch) == state.epoch else F"""{state.epoch:.2f}""" self.training_tracker.update( state.global_step + 1 , comment=F"""Epoch {epoch}/{state.num_train_epochs}""" , force_update=self._force_next_update , ) lowerCAmelCase_ : Dict = False def UpperCAmelCase__ ( self : Optional[Any] , A_ : List[str] , A_ : Optional[Any] , A_ : int , A_ : List[str]=None , **A_ : List[str]): if not has_length(A_): return if self.prediction_bar is None: if self.training_tracker is not None: lowerCAmelCase_ : int = self.training_tracker.add_child(len(A_)) else: lowerCAmelCase_ : List[str] = NotebookProgressBar(len(A_)) self.prediction_bar.update(1) else: self.prediction_bar.update(self.prediction_bar.value + 1) def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : List[str] , A_ : Dict , **A_ : Tuple): if self.prediction_bar is not None: self.prediction_bar.close() lowerCAmelCase_ : List[Any] = None def UpperCAmelCase__ ( self : List[Any] , A_ : Optional[Any] , A_ : str , A_ : Any , A_ : Tuple=None , **A_ : str): # Only for when there is no evaluation if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs: lowerCAmelCase_ : List[str] = {'''Training Loss''': logs['''loss''']} # First column is necessarily Step sine we're not in epoch eval strategy lowerCAmelCase_ : int = state.global_step self.training_tracker.write_line(A_) def UpperCAmelCase__ ( self : int , A_ : Union[str, Any] , A_ : Tuple , A_ : Tuple , A_ : Optional[Any]=None , **A_ : Optional[Any]): if self.training_tracker is not None: lowerCAmelCase_ : Any = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''} for log in reversed(state.log_history): if "loss" in log: lowerCAmelCase_ : Dict = log['''loss'''] break if self.first_column == "Epoch": lowerCAmelCase_ : str = int(state.epoch) else: lowerCAmelCase_ : Optional[int] = state.global_step lowerCAmelCase_ : Optional[Any] = '''eval''' for k in metrics: if k.endswith('''_loss'''): lowerCAmelCase_ : str = re.sub(r'''\_loss$''' , '''''' , A_) lowerCAmelCase_ : Any = metrics.pop('''total_flos''' , A_) lowerCAmelCase_ : List[Any] = metrics.pop('''epoch''' , A_) lowerCAmelCase_ : Optional[Any] = metrics.pop(F"""{metric_key_prefix}_runtime""" , A_) lowerCAmelCase_ : List[Any] = metrics.pop(F"""{metric_key_prefix}_samples_per_second""" , A_) lowerCAmelCase_ : List[Any] = metrics.pop(F"""{metric_key_prefix}_steps_per_second""" , A_) lowerCAmelCase_ : Tuple = metrics.pop(F"""{metric_key_prefix}_jit_compilation_time""" , A_) for k, v in metrics.items(): if k == F"""{metric_key_prefix}_loss""": lowerCAmelCase_ : Optional[int] = v else: lowerCAmelCase_ : str = k.split('''_''') lowerCAmelCase_ : List[str] = ''' '''.join([part.capitalize() for part in splits[1:]]) lowerCAmelCase_ : List[Any] = v self.training_tracker.write_line(A_) self.training_tracker.remove_child() lowerCAmelCase_ : Any = None # Evaluation takes a long time so we should force the next update. lowerCAmelCase_ : str = True def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Optional[int] , A_ : Union[str, Any] , A_ : Any , **A_ : int): self.training_tracker.update( state.global_step , comment=F"""Epoch {int(state.epoch)}/{state.num_train_epochs}""" , force_update=A_) lowerCAmelCase_ : str = None
103
import importlib import sys from argparse import REMAINDER, ArgumentParser from pathlib import Path import torch_xla.distributed.xla_multiprocessing as xmp def UpperCamelCase( ): lowerCAmelCase_ : List[str] = ArgumentParser( description=( '''PyTorch TPU distributed training launch ''' '''helper utility that will spawn up ''' '''multiple distributed processes''' ) ) # Optional arguments for the launch helper parser.add_argument('''--num_cores''' ,type=__UpperCamelCase ,default=1 ,help='''Number of TPU cores to use (1 or 8).''' ) # positional parser.add_argument( '''training_script''' ,type=__UpperCamelCase ,help=( '''The full path to the single TPU training ''' '''program/script to be launched in parallel, ''' '''followed by all the arguments for the ''' '''training script''' ) ,) # rest from the training program parser.add_argument('''training_script_args''' ,nargs=__UpperCamelCase ) return parser.parse_args() def UpperCamelCase( ): lowerCAmelCase_ : str = parse_args() # Import training_script as a module. lowerCAmelCase_ : str = Path(args.training_script ) sys.path.append(str(script_fpath.parent.resolve() ) ) lowerCAmelCase_ : Tuple = script_fpath.stem lowerCAmelCase_ : Union[str, Any] = importlib.import_module(__UpperCamelCase ) # Patch sys.argv lowerCAmelCase_ : Optional[int] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )] xmp.spawn(mod._mp_fn ,args=() ,nprocs=args.num_cores ) if __name__ == "__main__": main()
103
1
"""simple docstring""" import re from flax.core.frozen_dict import freeze from flax.traverse_util import flatten_dict, unflatten_dict from jax.experimental import PartitionSpec as P # Sentinels SCREAMING_SNAKE_CASE = object() # For specifying empty leaf dict `{}` SCREAMING_SNAKE_CASE = object() def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> List[Any]: A__ = tuple((re.compile(x + "$" ) for x in qs) ) for i in range(len(lowercase_ ) - len(lowercase_ ) + 1 ): A__ = [x.match(lowercase_ ) for x, y in zip(lowercase_ , ks[i:] )] if matches and all(lowercase_ ): return True return False def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[int]: def replace(lowercase_ , lowercase_ ): for rule, replacement in rules: if _match(lowercase_ , lowercase_ ): return replacement return val return replace def _SCREAMING_SNAKE_CASE ( ) -> List[str]: return [ # embeddings (("transformer", "wpe", "embedding"), P("mp" , lowercase_ )), (("transformer", "wte", "embedding"), P("mp" , lowercase_ )), # atention (("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowercase_ , "mp" )), (("attention", "out_proj", "kernel"), P("mp" , lowercase_ )), (("attention", "out_proj", "bias"), None), # mlp (("mlp", "c_fc", "kernel"), P(lowercase_ , "mp" )), (("mlp", "c_fc", "bias"), P("mp" )), (("mlp", "c_proj", "kernel"), P("mp" , lowercase_ )), (("mlp", "c_proj", "bias"), None), # layer norms ((r"ln_\d+", "bias"), None), ((r"\d+", r"ln_\d+", "scale"), None), (("ln_f", "bias"), None), (("ln_f", "scale"), None), ] def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> int: A__ = _get_partition_rules() A__ = _replacement_rules(lowercase_ ) A__ = {k: _unmatched for k in flatten_dict(lowercase_ )} A__ = {k: replace(lowercase_ , lowercase_ ) for k, v in initd.items()} assert _unmatched not in result.values(), "Incomplete partition spec." return freeze(unflatten_dict(lowercase_ ) )
230
"""simple docstring""" import random from .binary_exp_mod import bin_exp_mod def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_=10_00 ) -> Optional[Any]: if n < 2: return False if n % 2 == 0: return n == 2 # this means n is odd A__ = n - 1 A__ = 0 while d % 2 == 0: d /= 2 exp += 1 # n - 1=d*(2**exp) A__ = 0 while count < prec: A__ = random.randint(2 , n - 1 ) A__ = bin_exp_mod(lowercase_ , lowercase_ , lowercase_ ) if b != 1: A__ = True for _ in range(lowercase_ ): if b == n - 1: A__ = False break A__ = b * b b %= n if flag: return False count += 1 return True if __name__ == "__main__": SCREAMING_SNAKE_CASE = abs(int(input("Enter bound : ").strip())) print("Here's the list of primes:") print(", ".join(str(i) for i in range(n + 1) if is_prime_big(i)))
230
1
def A ( _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> str: '''simple docstring''' _UpperCAmelCase = """""" for word_or_phrase in separated: if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ): raise Exception('join() accepts only strings to be joined' ) joined += word_or_phrase + separator return joined.strip(SCREAMING_SNAKE_CASE__ ) if __name__ == "__main__": from doctest import testmod testmod()
339
from typing import List, Union from ..utils import ( add_end_docstrings, is_tf_available, is_torch_available, is_vision_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, Pipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_tf_available(): import tensorflow as tf from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING from ..tf_utils import stable_softmax if is_torch_available(): from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING lowercase : Any = logging.get_logger(__name__) @add_end_docstrings(lowerCAmelCase ) class __snake_case ( lowerCAmelCase ): def __init__( self ,*snake_case ,**snake_case ): '''simple docstring''' super().__init__(*snake_case ,**snake_case ) requires_backends(self ,"""vision""" ) self.check_model_type( TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING if self.framework == """tf""" else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING ) def _SCREAMING_SNAKE_CASE ( self ,snake_case=None ): '''simple docstring''' lowercase : List[Any] = {} if top_k is not None: lowercase : int = top_k return {}, {}, postprocess_params def __call__( self ,snake_case ,**snake_case ): '''simple docstring''' return super().__call__(snake_case ,**snake_case ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : Any = load_image(snake_case ) lowercase : List[Any] = self.image_processor(images=snake_case ,return_tensors=self.framework ) return model_inputs def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' lowercase : int = self.model(**snake_case ) return model_outputs def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case=5 ): '''simple docstring''' if top_k > self.model.config.num_labels: lowercase : Tuple = self.model.config.num_labels if self.framework == "pt": lowercase : str = model_outputs.logits.softmax(-1 )[0] lowercase , lowercase : Dict = probs.topk(snake_case ) elif self.framework == "tf": lowercase : Optional[int] = stable_softmax(model_outputs.logits ,axis=-1 )[0] lowercase : Union[str, Any] = tf.math.top_k(snake_case ,k=snake_case ) lowercase , lowercase : List[str] = topk.values.numpy(), topk.indices.numpy() else: raise ValueError(f"Unsupported framework: {self.framework}" ) lowercase : Tuple = scores.tolist() lowercase : Dict = ids.tolist() return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case ,snake_case )]
20
0
'''simple docstring''' import math import random def _a ( _lowercase : List[str] , _lowercase : Union[str, Any] = False ): '''simple docstring''' if deriv: return value * (1 - value) return 1 / (1 + math.exp(-value )) # Initial Value __UpperCAmelCase :Optional[Any] = 0.02 def _a ( _lowercase : int , _lowercase : Dict ): '''simple docstring''' __UpperCAmelCase : Tuple = float(2 * (random.randint(1 , 100 )) - 1 ) for _ in range(__lowerCAmelCase ): # Forward propagation __UpperCAmelCase : Any = sigmoid_function(INITIAL_VALUE * weight ) # How much did we miss? __UpperCAmelCase : Tuple = (expected / 100) - layer_a # Error delta __UpperCAmelCase : List[str] = layer_1_error * sigmoid_function(__lowerCAmelCase , __lowerCAmelCase ) # Update weight weight += INITIAL_VALUE * layer_1_delta return layer_a * 100 if __name__ == "__main__": import doctest doctest.testmod() __UpperCAmelCase :Tuple = int(input("Expected value: ")) __UpperCAmelCase :Union[str, Any] = int(input("Number of propagations: ")) print(forward_propagation(expected, number_propagations))
362
'''simple docstring''' from abc import ABC, abstractmethod from typing import List, Optional class a ( _a ): """simple docstring""" def __init__( self : Optional[Any] ) -> int: # test for the above condition self.test() def lowerCamelCase__ ( self : Dict ) -> List[str]: __UpperCAmelCase : Optional[int] = 0 __UpperCAmelCase : Any = False while not completed: if counter == 1: self.reset() __UpperCAmelCase : Optional[int] = self.advance() if not self.does_advance(snake_case ): raise Exception( '''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' ) __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = self.update(snake_case ) counter += 1 if counter > 1_0000: raise Exception('''update() does not fulfill the constraint.''' ) if self.remaining() != 0: raise Exception('''Custom Constraint is not defined correctly.''' ) @abstractmethod def lowerCamelCase__ ( self : Union[str, Any] ) -> List[str]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase__ ( self : Optional[int] , snake_case : int ) -> Optional[int]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase__ ( self : List[Any] , snake_case : int ) -> int: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase__ ( self : int ) -> Optional[int]: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase__ ( self : int ) -> Tuple: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) @abstractmethod def lowerCamelCase__ ( self : Union[str, Any] , snake_case : List[Any]=False ) -> Any: raise NotImplementedError( f'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' ) class a ( _a ): """simple docstring""" def __init__( self : int , snake_case : List[int] ) -> Tuple: super(snake_case , self ).__init__() if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0: raise ValueError(f'`token_ids` has to be a non-empty list, but is {token_ids}.' ) if any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids ): raise ValueError(f'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' ) __UpperCAmelCase : Dict = token_ids __UpperCAmelCase : Tuple = len(self.token_ids ) __UpperCAmelCase : List[str] = -1 # the index of the currently fulfilled step __UpperCAmelCase : int = False def lowerCamelCase__ ( self : List[str] ) -> str: if self.completed: return None return self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase__ ( self : Any , snake_case : int ) -> Optional[int]: if not isinstance(snake_case , snake_case ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' ) if self.completed: return False return token_id == self.token_ids[self.fulfilled_idx + 1] def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]: if not isinstance(snake_case , snake_case ): raise ValueError(f'`token_id` has to be an `int`, but is {token_id} of type {type(snake_case )}' ) __UpperCAmelCase : Any = False __UpperCAmelCase : Tuple = False __UpperCAmelCase : Tuple = False if self.does_advance(snake_case ): self.fulfilled_idx += 1 __UpperCAmelCase : Union[str, Any] = True if self.fulfilled_idx == (self.seqlen - 1): __UpperCAmelCase : List[Any] = True __UpperCAmelCase : Union[str, Any] = completed else: # failed to make progress. __UpperCAmelCase : List[str] = True self.reset() return stepped, completed, reset def lowerCamelCase__ ( self : int ) -> List[Any]: __UpperCAmelCase : Union[str, Any] = False __UpperCAmelCase : Union[str, Any] = 0 def lowerCamelCase__ ( self : str ) -> Optional[int]: return self.seqlen - (self.fulfilled_idx + 1) def lowerCamelCase__ ( self : int , snake_case : Dict=False ) -> List[str]: __UpperCAmelCase : List[str] = PhrasalConstraint(self.token_ids ) if stateful: __UpperCAmelCase : int = self.seqlen __UpperCAmelCase : Optional[Any] = self.fulfilled_idx __UpperCAmelCase : List[Any] = self.completed return new_constraint class a : """simple docstring""" def __init__( self : List[str] , snake_case : List[List[int]] , snake_case : Dict=True ) -> Any: __UpperCAmelCase : List[Any] = max([len(snake_case ) for one in nested_token_ids] ) __UpperCAmelCase : Union[str, Any] = {} for token_ids in nested_token_ids: __UpperCAmelCase : List[str] = root for tidx, token_id in enumerate(snake_case ): if token_id not in level: __UpperCAmelCase : List[Any] = {} __UpperCAmelCase : Tuple = level[token_id] if no_subsets and self.has_subsets(snake_case , snake_case ): raise ValueError( '''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is''' f' {nested_token_ids}.' ) __UpperCAmelCase : Tuple = root def lowerCamelCase__ ( self : Dict , snake_case : List[str] ) -> List[Any]: __UpperCAmelCase : Dict = self.trie for current_token in current_seq: __UpperCAmelCase : List[str] = start[current_token] __UpperCAmelCase : str = list(start.keys() ) return next_tokens def lowerCamelCase__ ( self : Optional[Any] , snake_case : List[str] ) -> Any: __UpperCAmelCase : Optional[Any] = self.next_tokens(snake_case ) return len(snake_case ) == 0 def lowerCamelCase__ ( self : Union[str, Any] , snake_case : int ) -> Optional[int]: __UpperCAmelCase : str = list(root.values() ) if len(snake_case ) == 0: return 1 else: return sum([self.count_leaves(snake_case ) for nn in next_nodes] ) def lowerCamelCase__ ( self : Optional[int] , snake_case : int , snake_case : Dict ) -> str: __UpperCAmelCase : Dict = self.count_leaves(snake_case ) return len(snake_case ) != leaf_count class a ( _a ): """simple docstring""" def __init__( self : Union[str, Any] , snake_case : List[List[int]] ) -> str: super(snake_case , self ).__init__() if not isinstance(snake_case , snake_case ) or len(snake_case ) == 0: raise ValueError(f'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' ) if any(not isinstance(snake_case , snake_case ) for token_ids in nested_token_ids ): raise ValueError(f'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' ) if any( any((not isinstance(snake_case , snake_case ) or token_id < 0) for token_id in token_ids ) for token_ids in nested_token_ids ): raise ValueError( f'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' ) __UpperCAmelCase : Optional[int] = DisjunctiveTrie(snake_case ) __UpperCAmelCase : Tuple = nested_token_ids __UpperCAmelCase : List[Any] = self.trie.max_height __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : Union[str, Any] = False def lowerCamelCase__ ( self : int ) -> List[Any]: __UpperCAmelCase : Optional[int] = self.trie.next_tokens(self.current_seq ) if len(snake_case ) == 0: return None else: return token_list def lowerCamelCase__ ( self : Tuple , snake_case : int ) -> Dict: if not isinstance(snake_case , snake_case ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' ) __UpperCAmelCase : List[str] = self.trie.next_tokens(self.current_seq ) return token_id in next_tokens def lowerCamelCase__ ( self : Any , snake_case : int ) -> Tuple: if not isinstance(snake_case , snake_case ): raise ValueError(f'`token_id` is supposed to be type `int`, but is {token_id} of type {type(snake_case )}' ) __UpperCAmelCase : Dict = False __UpperCAmelCase : Optional[Any] = False __UpperCAmelCase : str = False if self.does_advance(snake_case ): self.current_seq.append(snake_case ) __UpperCAmelCase : int = True else: __UpperCAmelCase : Optional[Any] = True self.reset() __UpperCAmelCase : Optional[Any] = self.trie.reached_leaf(self.current_seq ) __UpperCAmelCase : Tuple = completed return stepped, completed, reset def lowerCamelCase__ ( self : Dict ) -> Optional[Any]: __UpperCAmelCase : str = False __UpperCAmelCase : Tuple = [] def lowerCamelCase__ ( self : Tuple ) -> Any: if self.completed: # since this can be completed without reaching max height return 0 else: return self.seqlen - len(self.current_seq ) def lowerCamelCase__ ( self : Any , snake_case : Optional[int]=False ) -> Tuple: __UpperCAmelCase : str = DisjunctiveConstraint(self.token_ids ) if stateful: __UpperCAmelCase : Tuple = self.seqlen __UpperCAmelCase : Dict = self.current_seq __UpperCAmelCase : str = self.completed return new_constraint class a : """simple docstring""" def __init__( self : Union[str, Any] , snake_case : List[Constraint] ) -> Union[str, Any]: __UpperCAmelCase : Optional[Any] = constraints # max # of steps required to fulfill a given constraint __UpperCAmelCase : int = max([c.seqlen for c in constraints] ) __UpperCAmelCase : int = len(snake_case ) __UpperCAmelCase : Optional[int] = False self.init_state() def lowerCamelCase__ ( self : List[str] ) -> List[str]: __UpperCAmelCase : List[Any] = [] __UpperCAmelCase : List[str] = None __UpperCAmelCase : str = [constraint.copy(stateful=snake_case ) for constraint in self.constraints] def lowerCamelCase__ ( self : Optional[int] ) -> Optional[Any]: __UpperCAmelCase : Dict = 0 if self.inprogress_constraint: # extra points for having a constraint mid-fulfilled add += self.max_seqlen - self.inprogress_constraint.remaining() return (len(self.complete_constraints ) * self.max_seqlen) + add def lowerCamelCase__ ( self : Tuple ) -> int: __UpperCAmelCase : int = [] if self.inprogress_constraint is None: for constraint in self.pending_constraints: # "pending" == "unfulfilled yet" __UpperCAmelCase : Optional[int] = constraint.advance() if isinstance(snake_case , snake_case ): token_list.append(snake_case ) elif isinstance(snake_case , snake_case ): token_list.extend(snake_case ) else: __UpperCAmelCase : Optional[Any] = self.inprogress_constraint.advance() if isinstance(snake_case , snake_case ): token_list.append(snake_case ) elif isinstance(snake_case , snake_case ): token_list.extend(snake_case ) if len(snake_case ) == 0: return None else: return token_list def lowerCamelCase__ ( self : List[str] , snake_case : Optional[List[int]] ) -> Optional[int]: self.init_state() if token_ids is not None: for token in token_ids: # completes or steps **one** constraint __UpperCAmelCase , __UpperCAmelCase : Dict = self.add(snake_case ) # the entire list of constraints are fulfilled if self.completed: break def lowerCamelCase__ ( self : List[str] , snake_case : int ) -> List[str]: if not isinstance(snake_case , snake_case ): raise ValueError(f'`token_id` should be an `int`, but is `{token_id}`.' ) __UpperCAmelCase , __UpperCAmelCase : str = False, False if self.completed: __UpperCAmelCase : Union[str, Any] = True __UpperCAmelCase : str = False return complete, stepped if self.inprogress_constraint is not None: # In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current # job, simply update the state __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Any = self.inprogress_constraint.update(snake_case ) if reset: # 1. If the next token breaks the progress, then we must restart. # e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books". # But that doesn't mean we self.init_state(), since we only reset the state for this particular # constraint, not the full list of constraints. self.pending_constraints.append(self.inprogress_constraint.copy(stateful=snake_case ) ) __UpperCAmelCase : Optional[Any] = None if complete: # 2. If the next token completes the constraint, move it to completed list, set # inprogress to None. If there are no pending constraints either, then this full list of constraints # is complete. self.complete_constraints.append(self.inprogress_constraint ) __UpperCAmelCase : str = None if len(self.pending_constraints ) == 0: # we're done! __UpperCAmelCase : Optional[int] = True else: # Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list # of constraints? for cidx, pending_constraint in enumerate(self.pending_constraints ): if pending_constraint.does_advance(snake_case ): __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = pending_constraint.update(snake_case ) if not stepped: raise Exception( '''`constraint.update(token_id)` is not yielding incremental progress, ''' '''even though `constraint.does_advance(token_id)` is true.''' ) if complete: self.complete_constraints.append(snake_case ) __UpperCAmelCase : Tuple = None if not complete and stepped: __UpperCAmelCase : List[Any] = pending_constraint if complete or stepped: # If we made any progress at all, then it's at least not a "pending constraint". __UpperCAmelCase : Optional[int] = ( self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :] ) if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None: # If there's no longer any pending after this and no inprogress either, then we must be # complete. __UpperCAmelCase : Any = True break # prevent accidentally stepping through multiple constraints with just one token. return complete, stepped def lowerCamelCase__ ( self : int , snake_case : Optional[int]=True ) -> Optional[int]: __UpperCAmelCase : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects # throughout this process. So it's at initialization state. if stateful: __UpperCAmelCase : str = [ constraint.copy(stateful=snake_case ) for constraint in self.complete_constraints ] if self.inprogress_constraint is not None: __UpperCAmelCase : Union[str, Any] = self.inprogress_constraint.copy(stateful=snake_case ) __UpperCAmelCase : Tuple = [constraint.copy() for constraint in self.pending_constraints] return new_state
240
0
'''simple docstring''' def UpperCAmelCase_ ( __lowercase : float ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(__lowercase , __lowercase ): raise ValueError("Length must be a positive." ) return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2) def UpperCAmelCase_ ( __lowercase : float ) -> float: '''simple docstring''' if edge <= 0 or not isinstance(__lowercase , __lowercase ): raise ValueError("Length must be a positive." ) return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3) if __name__ == "__main__": import doctest doctest.testmod()
22
'''simple docstring''' import inspect import unittest import numpy as np from tests.test_modeling_common import floats_tensor from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel if is_vision_available(): from transformers import MaskFormerImageProcessor if is_vision_available(): from PIL import Image class A_ : def __init__( self : str , snake_case_ : int , snake_case_ : Union[str, Any]=2 , snake_case_ : List[Any]=True , snake_case_ : str=False , snake_case_ : str=1_0 , snake_case_ : str=3 , snake_case_ : Dict=3_2 * 4 , snake_case_ : Any=3_2 * 6 , snake_case_ : Optional[Any]=4 , snake_case_ : Optional[int]=3_2 , ): _UpperCAmelCase = parent _UpperCAmelCase = batch_size _UpperCAmelCase = is_training _UpperCAmelCase = use_auxiliary_loss _UpperCAmelCase = num_queries _UpperCAmelCase = num_channels _UpperCAmelCase = min_size _UpperCAmelCase = max_size _UpperCAmelCase = num_labels _UpperCAmelCase = mask_feature_size def lowercase ( self : Union[str, Any] ): _UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to( snake_case_ ) _UpperCAmelCase = torch.ones([self.batch_size, self.min_size, self.max_size] , device=snake_case_ ) _UpperCAmelCase = ( torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=snake_case_ ) > 0.5 ).float() _UpperCAmelCase = (torch.rand((self.batch_size, self.num_labels) , device=snake_case_ ) > 0.5).long() _UpperCAmelCase = self.get_config() return config, pixel_values, pixel_mask, mask_labels, class_labels def lowercase ( self : List[Any] ): return MaskFormerConfig.from_backbone_and_decoder_configs( backbone_config=SwinConfig( depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig( decoder_ffn_dim=1_2_8 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , ) def lowercase ( self : Optional[Any] ): _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.prepare_config_and_inputs() _UpperCAmelCase = {"pixel_values": pixel_values, "pixel_mask": pixel_mask} return config, inputs_dict def lowercase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ): _UpperCAmelCase = output.encoder_hidden_states _UpperCAmelCase = output.pixel_decoder_hidden_states _UpperCAmelCase = output.transformer_decoder_hidden_states self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , len(config.backbone_config.depths ) ) self.parent.assertTrue(len(snake_case_ ) , config.decoder_config.decoder_layers ) def lowercase ( self : Tuple , snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Optional[Any]=False ): with torch.no_grad(): _UpperCAmelCase = MaskFormerModel(config=snake_case_ ) model.to(snake_case_ ) model.eval() _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ , output_hidden_states=snake_case_ ) # the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the # encoder and pixel decoder self.parent.assertEqual( output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , ) # let's ensure the other two hidden state exists self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(output.encoder_last_hidden_state is not None ) if output_hidden_states: self.check_output_hidden_state(snake_case_ , snake_case_ ) def lowercase ( self : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : int , snake_case_ : str , snake_case_ : List[Any] ): _UpperCAmelCase = MaskFormerForInstanceSegmentation(config=snake_case_ ) model.to(snake_case_ ) model.eval() def comm_check_on_output(snake_case_ : int ): # let's still check that all the required stuff is there self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None ) self.parent.assertTrue(result.encoder_last_hidden_state is not None ) # okay, now we need to check the logits shape # due to the encoder compression, masks have a //4 spatial size self.parent.assertEqual( result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , ) # + 1 for null class self.parent.assertEqual( result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) ) with torch.no_grad(): _UpperCAmelCase = model(pixel_values=snake_case_ , pixel_mask=snake_case_ ) _UpperCAmelCase = model(snake_case_ ) comm_check_on_output(snake_case_ ) _UpperCAmelCase = model( pixel_values=snake_case_ , pixel_mask=snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) comm_check_on_output(snake_case_ ) self.parent.assertTrue(result.loss is not None ) self.parent.assertEqual(result.loss.shape , torch.Size([1] ) ) @require_torch class A_ ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ): _lowerCamelCase : Dict = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else () _lowerCamelCase : Tuple = ( {"""feature-extraction""": MaskFormerModel, """image-segmentation""": MaskFormerForInstanceSegmentation} if is_torch_available() else {} ) _lowerCamelCase : Optional[Any] = False _lowerCamelCase : Dict = False _lowerCamelCase : Any = False _lowerCamelCase : List[Any] = False def lowercase ( self : Optional[int] ): _UpperCAmelCase = MaskFormerModelTester(self ) _UpperCAmelCase = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ ) def lowercase ( self : Optional[Any] ): self.config_tester.run_common_tests() def lowercase ( self : Union[str, Any] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : int ): _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*snake_case_ ) @unittest.skip(reason="MaskFormer does not use inputs_embeds" ) def lowercase ( self : Any ): pass @unittest.skip(reason="MaskFormer does not have a get_input_embeddings method" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer is not a generative model" ) def lowercase ( self : List[str] ): pass @unittest.skip(reason="MaskFormer does not use token embeddings" ) def lowercase ( self : List[Any] ): pass @require_torch_multi_gpu @unittest.skip( reason="MaskFormer has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def lowercase ( self : Any ): pass @unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." ) def lowercase ( self : Union[str, Any] ): pass def lowercase ( self : List[str] ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ) _UpperCAmelCase = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic _UpperCAmelCase = [*signature.parameters.keys()] _UpperCAmelCase = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case_ ) @slow def lowercase ( self : Optional[int] ): for model_name in ["facebook/maskformer-swin-small-coco"]: _UpperCAmelCase = MaskFormerModel.from_pretrained(snake_case_ ) self.assertIsNotNone(snake_case_ ) def lowercase ( self : Optional[int] ): _UpperCAmelCase = (self.model_tester.min_size,) * 2 _UpperCAmelCase = { "pixel_values": torch.randn((2, 3, *size) , device=snake_case_ ), "mask_labels": torch.randn((2, 1_0, *size) , device=snake_case_ ), "class_labels": torch.zeros(2 , 1_0 , device=snake_case_ ).long(), } _UpperCAmelCase = MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None ) def lowercase ( self : Dict ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.create_and_check_maskformer_model(snake_case_ , **snake_case_ , output_hidden_states=snake_case_ ) def lowercase ( self : Any ): _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _UpperCAmelCase = model_class(snake_case_ ).to(snake_case_ ) _UpperCAmelCase = model(**snake_case_ , output_attentions=snake_case_ ) self.assertTrue(outputs.attentions is not None ) def lowercase ( self : int ): if not self.model_tester.is_training: return # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ).loss loss.backward() def lowercase ( self : int ): # only MaskFormerForInstanceSegmentation has the loss _UpperCAmelCase = self.all_model_classes[1] _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs() _UpperCAmelCase = True _UpperCAmelCase = True _UpperCAmelCase = model_class(snake_case_ ) model.to(snake_case_ ) model.train() _UpperCAmelCase = model(snake_case_ , mask_labels=snake_case_ , class_labels=snake_case_ ) _UpperCAmelCase = outputs.encoder_hidden_states[0] encoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.pixel_decoder_hidden_states[0] pixel_decoder_hidden_states.retain_grad() # we requires_grad=True in inputs_embeds (line 2152), the original implementation don't _UpperCAmelCase = outputs.transformer_decoder_hidden_states[0] transformer_decoder_hidden_states.retain_grad() _UpperCAmelCase = outputs.attentions[0] attentions.retain_grad() outputs.loss.backward(retain_graph=snake_case_ ) self.assertIsNotNone(encoder_hidden_states.grad ) self.assertIsNotNone(pixel_decoder_hidden_states.grad ) self.assertIsNotNone(transformer_decoder_hidden_states.grad ) self.assertIsNotNone(attentions.grad ) __SCREAMING_SNAKE_CASE :Dict = 1e-4 def UpperCAmelCase_ ( ) -> List[str]: '''simple docstring''' _UpperCAmelCase = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_vision @slow class A_ ( unittest.TestCase ): @cached_property def lowercase ( self : Dict ): return ( MaskFormerImageProcessor.from_pretrained("facebook/maskformer-swin-small-coco" ) if is_vision_available() else None ) def lowercase ( self : List[Any] ): _UpperCAmelCase = MaskFormerModel.from_pretrained("facebook/maskformer-swin-small-coco" ).to(snake_case_ ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) _UpperCAmelCase = torch.tensor( [[-0.0_4_8_2, 0.9_2_2_8, 0.4_9_5_1], [-0.2_5_4_7, 0.8_0_1_7, 0.8_5_2_7], [-0.0_0_6_9, 0.3_3_8_5, -0.0_0_8_9]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.encoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[-0.8_4_2_2, -0.8_4_3_4, -0.9_7_1_8], [-1.0_1_4_4, -0.5_5_6_5, -0.4_1_9_5], [-1.0_0_3_8, -0.4_4_8_4, -0.1_9_6_1]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) _UpperCAmelCase = torch.tensor( [[0.2_8_5_2, -0.0_1_5_9, 0.9_7_3_5], [0.6_2_5_4, 0.1_8_5_8, 0.8_5_2_9], [-0.0_6_8_0, -0.4_1_1_6, 1.8_4_1_3]] ).to(snake_case_ ) self.assertTrue( torch.allclose( outputs.transformer_decoder_last_hidden_state[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : Tuple ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [ [-1.3_7_3_7_1_2_4, -1.7_7_2_4_9_3_7, -1.9_3_6_4_2_3_3], [-1.5_9_7_7_2_8_1, -1.9_8_6_7_9_3_9, -2.1_5_2_3_6_9_5], [-1.5_7_9_5_3_9_8, -1.9_2_6_9_8_3_2, -2.0_9_3_9_4_2], ] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [ [1.6_512e00, -5.2_572e00, -3.3_519e00], [3.6_169e-02, -5.9_025e00, -2.9_313e00], [1.0_766e-04, -7.7_630e00, -5.1_263e00], ] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : int ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-resnet101-coco-stuff" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = prepare_img() _UpperCAmelCase = image_processor(snake_case_ , return_tensors="pt" ).to(snake_case_ ) _UpperCAmelCase = inputs["pixel_values"].shape # check size is divisible by 32 self.assertTrue((inputs_shape[-1] % 3_2) == 0 and (inputs_shape[-2] % 3_2) == 0 ) # check size self.assertEqual(snake_case_ , (1, 3, 8_0_0, 1_0_8_8) ) with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) # masks_queries_logits _UpperCAmelCase = outputs.masks_queries_logits self.assertEqual( masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , ) _UpperCAmelCase = [[-0.9_0_4_6, -2.6_3_6_6, -4.6_0_6_2], [-3.4_1_7_9, -5.7_8_9_0, -8.8_0_5_7], [-4.9_1_7_9, -7.6_5_6_0, -1_0.7_7_1_1]] _UpperCAmelCase = torch.tensor(snake_case_ ).to(snake_case_ ) self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , snake_case_ , atol=snake_case_ ) ) # class_queries_logits _UpperCAmelCase = outputs.class_queries_logits self.assertEqual( class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) ) _UpperCAmelCase = torch.tensor( [[4.7_1_8_8, -3.2_5_8_5, -2.8_8_5_7], [6.6_8_7_1, -2.9_1_8_1, -1.2_4_8_7], [7.2_4_4_9, -2.2_7_6_4, -2.1_8_7_4]] ).to(snake_case_ ) self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , snake_case_ , atol=snake_case_ ) ) def lowercase ( self : List[Any] ): _UpperCAmelCase = ( MaskFormerForInstanceSegmentation.from_pretrained("facebook/maskformer-swin-small-coco" ) .to(snake_case_ ) .eval() ) _UpperCAmelCase = self.default_image_processor _UpperCAmelCase = image_processor( [np.zeros((3, 8_0_0, 1_3_3_3) ), np.zeros((3, 8_0_0, 1_3_3_3) )] , segmentation_maps=[np.zeros((3_8_4, 3_8_4) ).astype(np.floataa ), np.zeros((3_8_4, 3_8_4) ).astype(np.floataa )] , return_tensors="pt" , ) _UpperCAmelCase = inputs["pixel_values"].to(snake_case_ ) _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["mask_labels"]] _UpperCAmelCase = [el.to(snake_case_ ) for el in inputs["class_labels"]] with torch.no_grad(): _UpperCAmelCase = model(**snake_case_ ) self.assertTrue(outputs.loss is not None )
22
1
'''simple docstring''' import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed A__ : Dict ='''true''' def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase=82 , lowerCAmelCase=16 ): """simple docstring""" set_seed(42 ) _lowerCAmelCase = RegressionModel() _lowerCAmelCase = deepcopy(_a ) _lowerCAmelCase = RegressionDataset(length=_a ) _lowerCAmelCase = DataLoader(_a , batch_size=_a ) model.to(accelerator.device ) _lowerCAmelCase = accelerator.prepare(_a , _a ) return model, ddp_model, dataloader def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase=False ): """simple docstring""" _lowerCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" ) _lowerCAmelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" ) def tokenize_function(lowerCAmelCase ): _lowerCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_a , max_length=_a ) return outputs with accelerator.main_process_first(): _lowerCAmelCase = dataset.map( _a , batched=_a , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) _lowerCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(lowerCAmelCase ): if use_longest: return tokenizer.pad(_a , padding="""longest""" , return_tensors="""pt""" ) return tokenizer.pad(_a , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" ) return DataLoader(_a , shuffle=_a , collate_fn=_a , batch_size=16 ) def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = Accelerator(dispatch_batches=_a , split_batches=_a ) _lowerCAmelCase = get_dataloader(_a , not dispatch_batches ) _lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained( """hf-internal-testing/mrpc-bert-base-cased""" , return_dict=_a ) _lowerCAmelCase = accelerator.prepare(_a , _a ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ): """simple docstring""" _lowerCAmelCase = [] for batch in dataloader: _lowerCAmelCase = batch.values() with torch.no_grad(): _lowerCAmelCase = model(_a ) _lowerCAmelCase = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) _lowerCAmelCase = [], [] for logit, targ in logits_and_targets: logits.append(_a ) targs.append(_a ) _lowerCAmelCase = torch.cat(_a ), torch.cat(_a ) return logits, targs def UpperCamelCase__ ( lowerCAmelCase , lowerCAmelCase=82 , lowerCAmelCase=False , lowerCAmelCase=False , lowerCAmelCase=16 ): """simple docstring""" _lowerCAmelCase = get_basic_setup(_a , _a , _a ) _lowerCAmelCase = generate_predictions(_a , _a , _a ) assert ( len(_a ) == num_samples ), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(_a )}" def UpperCamelCase__ ( lowerCAmelCase = False , lowerCAmelCase = False ): """simple docstring""" _lowerCAmelCase = evaluate.load("""glue""" , """mrpc""" ) _lowerCAmelCase = get_mrpc_setup(_a , _a ) # First do baseline _lowerCAmelCase = setup["""no"""] model.to(_a ) model.eval() for batch in dataloader: batch.to(_a ) with torch.inference_mode(): _lowerCAmelCase = model(**_a ) _lowerCAmelCase = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=_a , references=batch["""labels"""] ) _lowerCAmelCase = metric.compute() # Then do distributed _lowerCAmelCase = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): _lowerCAmelCase = model(**_a ) _lowerCAmelCase = outputs.logits.argmax(dim=-1 ) _lowerCAmelCase = batch["""labels"""] _lowerCAmelCase = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=_a , references=_a ) _lowerCAmelCase = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n" def UpperCamelCase__ ( ): """simple docstring""" _lowerCAmelCase = Accelerator(split_batches=_a , dispatch_batches=_a ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("""**Testing gather_for_metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" ) test_mrpc(_a , _a ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test torch metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: _lowerCAmelCase = Accelerator(split_batches=_a , dispatch_batches=_a ) if accelerator.is_local_main_process: print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" ) test_torch_metrics(_a , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test last batch is not dropped when perfectly divisible**""" ) _lowerCAmelCase = Accelerator() test_torch_metrics(_a , 5_12 ) accelerator.state._reset_state() def UpperCamelCase__ ( lowerCAmelCase ): """simple docstring""" main() if __name__ == "__main__": main()
365
'''simple docstring''' import unittest from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available from transformers.pipelines import pipeline from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_tf, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class UpperCAmelCase : @staticmethod def lowercase__ ( *__snake_case : Optional[Any] , **__snake_case : Any ) -> Tuple: pass @is_pipeline_test @require_torch @require_vision class UpperCAmelCase ( unittest.TestCase ): _lowercase: Union[str, Any] = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING def lowercase__ ( self : List[str] , __snake_case : Optional[Any] , __snake_case : str , __snake_case : List[str] ) -> int: _lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) _lowerCAmelCase = [ { """image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ), """question""": """How many cats are there?""", }, { """image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""", """question""": """How many cats are there?""", }, ] return vqa_pipeline, examples def lowercase__ ( self : Any , __snake_case : List[Any] , __snake_case : List[Any] ) -> Union[str, Any]: _lowerCAmelCase = vqa_pipeline(__snake_case , top_k=1 ) self.assertEqual( __snake_case , [ [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}], [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}], ] , ) @require_torch def lowercase__ ( self : str ) -> int: _lowerCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" ) _lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _lowerCAmelCase = """How many cats are there?""" _lowerCAmelCase = vqa_pipeline(image=__snake_case , question="""How many cats are there?""" , top_k=2 ) self.assertEqual( __snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] ) _lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( __snake_case , [{"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}, {"""score""": ANY(__snake_case ), """answer""": ANY(__snake_case )}] ) @slow @require_torch def lowercase__ ( self : List[Any] ) -> List[str]: _lowerCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" ) _lowerCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png""" _lowerCAmelCase = """How many cats are there?""" _lowerCAmelCase = vqa_pipeline(image=__snake_case , question=__snake_case , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] ) _lowerCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}] ) _lowerCAmelCase = vqa_pipeline( [{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 ) self.assertEqual( nested_simplify(__snake_case , decimals=4 ) , [[{"""score""": 0.87_99, """answer""": """2"""}, {"""score""": 0.2_96, """answer""": """1"""}]] * 2 , ) @require_tf @unittest.skip("""Visual question answering not implemented in TF""" ) def lowercase__ ( self : List[str] ) -> Union[str, Any]: pass
220
0
'''simple docstring''' import gc import unittest import numpy as np import torch from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionPanoramaPipeline, UNetaDConditionModel, ) from diffusers.utils import slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() @skip_mps class lowercase ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ): """simple docstring""" UpperCAmelCase = StableDiffusionPanoramaPipeline UpperCAmelCase = TEXT_TO_IMAGE_PARAMS UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS def _snake_case ( self ) -> List[str]: torch.manual_seed(0 ) _UpperCAmelCase : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,) _UpperCAmelCase : Tuple = DDIMScheduler() torch.manual_seed(0 ) _UpperCAmelCase : Tuple = AutoencoderKL( block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,) torch.manual_seed(0 ) _UpperCAmelCase : Dict = CLIPTextConfig( bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1_000 ,) _UpperCAmelCase : Any = CLIPTextModel(a_ ) _UpperCAmelCase : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" ) _UpperCAmelCase : int = { """unet""": unet, """scheduler""": scheduler, """vae""": vae, """text_encoder""": text_encoder, """tokenizer""": tokenizer, """safety_checker""": None, """feature_extractor""": None, } return components def _snake_case ( self ,a_ ,a_=0 ) -> List[str]: _UpperCAmelCase : List[str] = torch.manual_seed(a_ ) _UpperCAmelCase : Optional[int] = { """prompt""": """a photo of the dolomites""", """generator""": generator, # Setting height and width to None to prevent OOMs on CPU. """height""": None, """width""": None, """num_inference_steps""": 1, """guidance_scale""": 6.0, """output_type""": """numpy""", } return inputs def _snake_case ( self ) -> Optional[Any]: _UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : List[str] = self.get_dummy_components() _UpperCAmelCase : List[str] = StableDiffusionPanoramaPipeline(**a_ ) _UpperCAmelCase : Optional[int] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) _UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(a_ ) _UpperCAmelCase : Tuple = sd_pipe(**a_ ).images _UpperCAmelCase : Union[str, Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase : Optional[int] = np.array([0.6186, 0.5374, 0.4915, 0.4135, 0.4114, 0.4563, 0.5128, 0.4977, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self ) -> Tuple: super().test_inference_batch_consistent(batch_sizes=[1, 2] ) def _snake_case ( self ) -> Tuple: super().test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=3.2_5E-3 ) def _snake_case ( self ) -> Optional[Any]: _UpperCAmelCase : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : List[str] = self.get_dummy_components() _UpperCAmelCase : int = StableDiffusionPanoramaPipeline(**a_ ) _UpperCAmelCase : Optional[int] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) _UpperCAmelCase : Any = self.get_dummy_inputs(a_ ) _UpperCAmelCase : Dict = """french fries""" _UpperCAmelCase : Any = sd_pipe(**a_ ,negative_prompt=a_ ) _UpperCAmelCase : Optional[int] = output.images _UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase : Dict = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self ) -> List[str]: _UpperCAmelCase : Union[str, Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : Optional[int] = self.get_dummy_components() _UpperCAmelCase : List[Any] = StableDiffusionPanoramaPipeline(**a_ ) _UpperCAmelCase : List[Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) _UpperCAmelCase : Optional[int] = self.get_dummy_inputs(a_ ) _UpperCAmelCase : int = sd_pipe(**a_ ,view_batch_size=2 ) _UpperCAmelCase : Optional[Any] = output.images _UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase : int = np.array([0.6187, 0.5375, 0.4915, 0.4136, 0.4114, 0.4563, 0.5128, 0.4976, 0.4757] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self ) -> Any: _UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : int = self.get_dummy_components() _UpperCAmelCase : str = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ) _UpperCAmelCase : Dict = StableDiffusionPanoramaPipeline(**a_ ) _UpperCAmelCase : Optional[Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) _UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(a_ ) _UpperCAmelCase : List[str] = sd_pipe(**a_ ).images _UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase : Tuple = np.array([0.4024, 0.6510, 0.4901, 0.5378, 0.5813, 0.5622, 0.4795, 0.4467, 0.4952] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 def _snake_case ( self ) -> Optional[int]: _UpperCAmelCase : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator _UpperCAmelCase : Any = self.get_dummy_components() _UpperCAmelCase : List[str] = PNDMScheduler( beta_start=0.0_0085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,skip_prk_steps=a_ ) _UpperCAmelCase : Tuple = StableDiffusionPanoramaPipeline(**a_ ) _UpperCAmelCase : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) _UpperCAmelCase : str = self.get_dummy_inputs(a_ ) _UpperCAmelCase : Any = sd_pipe(**a_ ).images _UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 64, 64, 3) _UpperCAmelCase : Optional[Any] = np.array([0.6391, 0.6291, 0.4861, 0.5134, 0.5552, 0.4578, 0.5032, 0.5023, 0.4539] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 @slow @require_torch_gpu class lowercase ( unittest.TestCase ): """simple docstring""" def _snake_case ( self ) -> Union[str, Any]: super().tearDown() gc.collect() torch.cuda.empty_cache() def _snake_case ( self ,a_=0 ) -> Any: _UpperCAmelCase : Optional[Any] = torch.manual_seed(a_ ) _UpperCAmelCase : str = { """prompt""": """a photo of the dolomites""", """generator""": generator, """num_inference_steps""": 3, """guidance_scale""": 7.5, """output_type""": """numpy""", } return inputs def _snake_case ( self ) -> List[Any]: _UpperCAmelCase : Optional[Any] = """stabilityai/stable-diffusion-2-base""" _UpperCAmelCase : List[str] = DDIMScheduler.from_pretrained(a_ ,subfolder="""scheduler""" ) _UpperCAmelCase : List[Any] = StableDiffusionPanoramaPipeline.from_pretrained(a_ ,scheduler=a_ ,safety_checker=a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() _UpperCAmelCase : Tuple = self.get_inputs() _UpperCAmelCase : Union[str, Any] = pipe(**a_ ).images _UpperCAmelCase : List[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) _UpperCAmelCase : Dict = np.array( [ 0.3696_8392, 0.2702_5372, 0.3244_6766, 0.2837_9387, 0.3636_3274, 0.3073_3347, 0.2710_0027, 0.2705_4125, 0.2553_6096, ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-2 def _snake_case ( self ) -> Tuple: _UpperCAmelCase : List[str] = StableDiffusionPanoramaPipeline.from_pretrained( """stabilityai/stable-diffusion-2-base""" ,safety_checker=a_ ) _UpperCAmelCase : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() _UpperCAmelCase : Optional[int] = self.get_inputs() _UpperCAmelCase : Union[str, Any] = pipe(**a_ ).images _UpperCAmelCase : Optional[Any] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 2_048, 3) _UpperCAmelCase : Dict = np.array( [ [ 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, ] ] ) assert np.abs(expected_slice - image_slice ).max() < 1E-3 def _snake_case ( self ) -> Dict: _UpperCAmelCase : List[Any] = 0 def callback_fn(a_ ,a_ ,a_ ) -> None: _UpperCAmelCase : Optional[int] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: _UpperCAmelCase : List[Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) _UpperCAmelCase : Any = latents[0, -3:, -3:, -1] _UpperCAmelCase : Optional[Any] = np.array( [ 0.1868_1869, 0.3390_7816, 0.536_1276, 0.1443_2865, -0.0285_6611, -0.7394_1123, 0.2339_7987, 0.4732_2682, -0.3782_3164, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 elif step == 2: _UpperCAmelCase : Any = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 256) _UpperCAmelCase : Tuple = latents[0, -3:, -3:, -1] _UpperCAmelCase : Union[str, Any] = np.array( [ 0.1853_9645, 0.3398_7248, 0.537_8559, 0.1443_7142, -0.0245_5261, -0.733_8317, 0.2399_0755, 0.4735_6272, -0.378_6505, ] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2 _UpperCAmelCase : Dict = False _UpperCAmelCase : Tuple = """stabilityai/stable-diffusion-2-base""" _UpperCAmelCase : List[str] = DDIMScheduler.from_pretrained(a_ ,subfolder="""scheduler""" ) _UpperCAmelCase : Tuple = StableDiffusionPanoramaPipeline.from_pretrained(a_ ,scheduler=a_ ,safety_checker=a_ ) _UpperCAmelCase : Optional[Any] = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() _UpperCAmelCase : int = self.get_inputs() pipe(**a_ ,callback=a_ ,callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def _snake_case ( self ) -> Any: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() _UpperCAmelCase : Tuple = """stabilityai/stable-diffusion-2-base""" _UpperCAmelCase : Tuple = DDIMScheduler.from_pretrained(a_ ,subfolder="""scheduler""" ) _UpperCAmelCase : Optional[Any] = StableDiffusionPanoramaPipeline.from_pretrained(a_ ,scheduler=a_ ,safety_checker=a_ ) _UpperCAmelCase : Dict = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() _UpperCAmelCase : int = self.get_inputs() _UpperCAmelCase : List[str] = pipe(**a_ ) _UpperCAmelCase : List[Any] = torch.cuda.max_memory_allocated() # make sure that less than 5.2 GB is allocated assert mem_bytes < 5.5 * 10**9
215
'''simple docstring''' # using dfs for finding eulerian path traversal def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None )-> List[str]: '''simple docstring''' _UpperCAmelCase : Any = (path or []) + [u] for v in graph[u]: if visited_edge[u][v] is False: _UpperCAmelCase ,_UpperCAmelCase : Tuple = True, True _UpperCAmelCase : List[Any] = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) return path def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Optional[int]: '''simple docstring''' _UpperCAmelCase : Optional[Any] = 0 _UpperCAmelCase : Optional[int] = -1 for i in range(lowerCAmelCase_ ): if i not in graph.keys(): continue if len(graph[i] ) % 2 == 1: odd_degree_nodes += 1 _UpperCAmelCase : Optional[int] = i if odd_degree_nodes == 0: return 1, odd_node if odd_degree_nodes == 2: return 2, odd_node return 3, odd_node def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[Any]: '''simple docstring''' _UpperCAmelCase : str = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )] _UpperCAmelCase ,_UpperCAmelCase : int = check_circuit_or_path(lowerCAmelCase_ , lowerCAmelCase_ ) if check == 3: print("""graph is not Eulerian""" ) print("""no path""" ) return _UpperCAmelCase : Dict = 1 if check == 2: _UpperCAmelCase : Dict = odd_node print("""graph has a Euler path""" ) if check == 1: print("""graph has a Euler cycle""" ) _UpperCAmelCase : Dict = dfs(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) print(lowerCAmelCase_ ) def snake_case_ ( )-> Union[str, Any]: '''simple docstring''' _UpperCAmelCase : Any = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]} _UpperCAmelCase : int = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]} _UpperCAmelCase : Tuple = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]} _UpperCAmelCase : List[Any] = {1: [2, 3], 2: [1, 3], 3: [1, 2]} _UpperCAmelCase : List[str] = { 1: [], 2: [] # all degree is zero } _UpperCAmelCase : Union[str, Any] = 10 check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) check_euler(lowerCAmelCase_ , lowerCAmelCase_ ) if __name__ == "__main__": main()
215
1
'''simple docstring''' import numpy as np from matplotlib import pyplot as plt from sklearn.datasets import load_iris from sklearn.metrics import ConfusionMatrixDisplay from sklearn.model_selection import train_test_split from xgboost import XGBClassifier def _lowerCAmelCase ( _UpperCamelCase : dict ) -> tuple: """simple docstring""" return (data["data"], data["target"]) def _lowerCAmelCase ( _UpperCamelCase : np.ndarray , _UpperCamelCase : np.ndarray ) -> XGBClassifier: """simple docstring""" _SCREAMING_SNAKE_CASE =XGBClassifier() classifier.fit(_UpperCamelCase , _UpperCamelCase ) return classifier def _lowerCAmelCase ( ) -> None: """simple docstring""" _SCREAMING_SNAKE_CASE =load_iris() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =data_handling(_UpperCamelCase ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE =train_test_split( _UpperCamelCase , _UpperCamelCase , test_size=0.25 ) _SCREAMING_SNAKE_CASE =iris['target_names'] # Create an XGBoost Classifier from the training data _SCREAMING_SNAKE_CASE =xgboost(_UpperCamelCase , _UpperCamelCase ) # Display the confusion matrix of the classifier with both training and test sets ConfusionMatrixDisplay.from_estimator( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap='Blues' , normalize='true' , ) plt.title('Normalized Confusion Matrix - IRIS Dataset' ) plt.show() if __name__ == "__main__": import doctest doctest.testmod(verbose=True) main()
114
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCamelCase : List[str] = {"configuration_ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig", "IBertOnnxConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase : Union[str, Any] = [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] if TYPE_CHECKING: from .configuration_ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig, IBertOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, IBertPreTrainedModel, ) else: import sys lowerCamelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
114
1
import pytest _UpperCAmelCase : Dict ="""__dummy_dataset1__""" _UpperCAmelCase : Dict =""" import json import os import datasets REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\" URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"} class __DummyDataset1__(datasets.GeneratorBasedBuilder): def _info(self): features = datasets.Features( { \"tokens\": datasets.Sequence(datasets.Value(\"string\")), \"ner_tags\": datasets.Sequence( datasets.features.ClassLabel( names=[ \"O\", \"B-PER\", \"I-PER\", \"B-ORG\", \"I-ORG\", \"B-LOC\", \"I-LOC\", ] ) ), \"langs\": datasets.Sequence(datasets.Value(\"string\")), \"spans\": datasets.Sequence(datasets.Value(\"string\")), } ) return datasets.DatasetInfo(features=features) def _split_generators(self, dl_manager): dl_path = dl_manager.download(URLS) return [ datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}), datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}), ] def _generate_examples(self, filepath): with open(filepath, \"r\", encoding=\"utf-8\") as f: for i, line in enumerate(f): yield i, json.loads(line) """ @pytest.fixture def lowerCAmelCase ( )-> Dict: return DATASET_LOADING_SCRIPT_NAME @pytest.fixture def lowerCAmelCase ( )-> int: return DATASET_LOADING_SCRIPT_CODE @pytest.fixture def lowerCAmelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> Tuple: lowerCAmelCase_ : Optional[int] = dataset_loading_script_name lowerCAmelCase_ : Union[str, Any] = tmp_path / "datasets" / script_name script_dir.mkdir(parents=lowerCAmelCase_ ) lowerCAmelCase_ : List[str] = script_dir / f"""{script_name}.py""" with open(lowerCAmelCase_ , '''w''' ) as f: f.write(lowerCAmelCase_ ) return str(lowerCAmelCase_ )
262
"""simple docstring""" import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class _lowerCAmelCase ( unittest.TestCase ): __UpperCAmelCase : Union[str, Any] = JukeboxTokenizer __UpperCAmelCase : Union[str, Any] = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def lowerCamelCase ( self ) -> int: '''simple docstring''' import torch snake_case : Any = JukeboxTokenizer.from_pretrained("openai/jukebox-1b-lyrics" ) snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"] # fmt: off snake_case : Optional[int] = [ torch.tensor([[ 0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), torch.tensor([[0, 0, 0, 1069, 11]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) ) @require_torch def lowerCamelCase ( self ) -> Any: '''simple docstring''' import torch snake_case : Tuple = JukeboxTokenizer.from_pretrained("openai/jukebox-5b-lyrics" ) snake_case : Optional[Any] = tokenizer(**self.metas )["input_ids"] # fmt: off snake_case : List[Any] = [ torch.tensor([[ 0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) ) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) ) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
203
0
"""simple docstring""" from collections import namedtuple import requests from lxml import html # type: ignore _lowerCAmelCase : Optional[int] = namedtuple("covid_data", "cases deaths recovered") def __snake_case ( SCREAMING_SNAKE_CASE__ : str = "https://www.worldometers.info/coronavirus/" ) -> covid_data: '''simple docstring''' _UpperCAmelCase : Optional[Any] = "//div[@class = \"maincounter-number\"]/span/text()" return covid_data(*html.fromstring(requests.get(SCREAMING_SNAKE_CASE__ ).content ).xpath(SCREAMING_SNAKE_CASE__ ) ) _lowerCAmelCase : List[Any] = "Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}" print(fmt.format(*covid_stats()))
202
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from .tokenization_electra import ElectraTokenizer _lowerCAmelCase : List[str] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} _lowerCAmelCase : int = { "vocab_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt" ), "google/electra-base-generator": "https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt", "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt" ), }, "tokenizer_file": { "google/electra-small-generator": ( "https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json" ), "google/electra-base-generator": ( "https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json" ), "google/electra-large-generator": ( "https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json" ), "google/electra-small-discriminator": ( "https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json" ), "google/electra-base-discriminator": ( "https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json" ), "google/electra-large-discriminator": ( "https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json" ), }, } _lowerCAmelCase : List[Any] = { "google/electra-small-generator": 5_12, "google/electra-base-generator": 5_12, "google/electra-large-generator": 5_12, "google/electra-small-discriminator": 5_12, "google/electra-base-discriminator": 5_12, "google/electra-large-discriminator": 5_12, } _lowerCAmelCase : Optional[Any] = { "google/electra-small-generator": {"do_lower_case": True}, "google/electra-base-generator": {"do_lower_case": True}, "google/electra-large-generator": {"do_lower_case": True}, "google/electra-small-discriminator": {"do_lower_case": True}, "google/electra-base-discriminator": {"do_lower_case": True}, "google/electra-large-discriminator": {"do_lower_case": True}, } class UpperCAmelCase_ ( _UpperCamelCase ): __SCREAMING_SNAKE_CASE : Optional[Any] = VOCAB_FILES_NAMES __SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_INIT_CONFIGURATION __SCREAMING_SNAKE_CASE : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES __SCREAMING_SNAKE_CASE : Tuple = ElectraTokenizer def __init__( self : Dict , A : Dict=None , A : Optional[int]=None , A : Dict=True , A : Optional[Any]="[UNK]" , A : Any="[SEP]" , A : str="[PAD]" , A : Tuple="[CLS]" , A : Optional[Any]="[MASK]" , A : Any=True , A : Tuple=None , **A : Any , ): super().__init__( A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , ) _UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , A ) != do_lower_case or normalizer_state.get("strip_accents" , A ) != strip_accents or normalizer_state.get("handle_chinese_chars" , A ) != tokenize_chinese_chars ): _UpperCAmelCase : Union[str, Any] = getattr(A , normalizer_state.pop("type" ) ) _UpperCAmelCase : Dict = do_lower_case _UpperCAmelCase : Optional[int] = strip_accents _UpperCAmelCase : Any = tokenize_chinese_chars _UpperCAmelCase : Optional[Any] = normalizer_class(**A ) _UpperCAmelCase : int = do_lower_case def snake_case_ ( self : Tuple , A : str , A : int=None ): _UpperCAmelCase : Dict = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def snake_case_ ( self : Any , A : List[int] , A : Optional[List[int]] = None ): _UpperCAmelCase : Any = [self.sep_token_id] _UpperCAmelCase : Dict = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def snake_case_ ( self : Any , A : str , A : Optional[str] = None ): _UpperCAmelCase : List[Any] = self._tokenizer.model.save(A , name=A ) return tuple(A )
202
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available A_ : List[str] = {"configuration_yolos": ["YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP", "YolosConfig", "YolosOnnxConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : List[Any] = ["YolosFeatureExtractor"] A_ : Optional[int] = ["YolosImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: A_ : str = [ "YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST", "YolosForObjectDetection", "YolosModel", "YolosPreTrainedModel", ] if TYPE_CHECKING: from .configuration_yolos import YOLOS_PRETRAINED_CONFIG_ARCHIVE_MAP, YolosConfig, YolosOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_yolos import YolosFeatureExtractor from .image_processing_yolos import YolosImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_yolos import ( YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST, YolosForObjectDetection, YolosModel, YolosPreTrainedModel, ) else: import sys A_ : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
165
"""simple docstring""" from __future__ import annotations from collections import namedtuple def A ( snake_case__ , snake_case__ , snake_case__ ): '''simple docstring''' SCREAMING_SNAKE_CASE__ = namedtuple("""result""" , """name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""" , power / current ) elif current == 0: return result("""current""" , power / voltage ) elif power == 0: return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
165
1
import warnings from functools import wraps from typing import Callable def a( A : Callable ) -> Callable: """simple docstring""" @wraps(A ) def _inner_fn(*A : Any , **A : Union[str, Any] ): warnings.warn( (f'''\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.''') , A , ) return fn(*A , **A ) return _inner_fn
352
import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _lowercase ( lowerCAmelCase ): """simple docstring""" __A = ["image_processor", "tokenizer"] __A = "ViTImageProcessor" __A = ("CLIPTokenizer", "CLIPTokenizerFast") def __init__(self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ): """simple docstring""" a = None if "feature_extractor" in kwargs: warnings.warn( "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`" " instead." , lowerCamelCase_ , ) a = kwargs.pop("feature_extractor" ) a = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("You need to specify an `image_processor`." ) if tokenizer is None: raise ValueError("You need to specify a `tokenizer`." ) super().__init__(lowerCamelCase_ , lowerCamelCase_ ) def __call__(self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ): """simple docstring""" if text is None and visual_prompt is None and images is None: raise ValueError("You have to specify either text, visual prompt or images." ) if text is not None and visual_prompt is not None: raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." ) if text is not None: a = self.tokenizer(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if visual_prompt is not None: a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if images is not None: a = self.image_processor(lowerCamelCase_ , return_tensors=lowerCamelCase_ , **lowerCamelCase_ ) if visual_prompt is not None and images is not None: a = { "pixel_values": image_features.pixel_values, "conditional_pixel_values": prompt_features.pixel_values, } return encoding elif text is not None and images is not None: a = image_features.pixel_values return encoding elif text is not None: return encoding elif visual_prompt is not None: a = { "conditional_pixel_values": prompt_features.pixel_values, } return encoding else: return BatchEncoding(data=dict(**lowerCamelCase_ ) , tensor_type=lowerCamelCase_ ) def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" return self.tokenizer.batch_decode(*lowerCamelCase_ , **lowerCamelCase_ ) def UpperCamelCase_ (self , *lowerCamelCase_ , **lowerCamelCase_ ): """simple docstring""" return self.tokenizer.decode(*lowerCamelCase_ , **lowerCamelCase_ ) @property def UpperCamelCase_ (self ): """simple docstring""" warnings.warn( "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCamelCase_ , ) return self.image_processor_class @property def UpperCamelCase_ (self ): """simple docstring""" warnings.warn( "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCamelCase_ , ) return self.image_processor
71
0
'''simple docstring''' from typing import Dict, Iterable, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, logging _SCREAMING_SNAKE_CASE = logging.get_logger(__name__) class lowerCAmelCase_ ( __magic_name__ ): __lowerCamelCase : Union[str, Any] = ["pixel_values"] def __init__( self , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = True , _lowerCAmelCase = None , _lowerCAmelCase = True , _lowerCAmelCase = 1 / 255 , _lowerCAmelCase = True , _lowerCAmelCase = IMAGENET_DEFAULT_MEAN , _lowerCAmelCase = IMAGENET_DEFAULT_STD , **_lowerCAmelCase , ) -> None: super().__init__(**_lowerCAmelCase ) _lowerCAmelCase = size if size is not None else {"shortest_edge": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else {"height": 224, "width": 224} _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = do_resize _lowerCAmelCase = size _lowerCAmelCase = resample _lowerCAmelCase = do_center_crop _lowerCAmelCase = crop_size _lowerCAmelCase = do_rescale _lowerCAmelCase = rescale_factor _lowerCAmelCase = do_normalize _lowerCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN _lowerCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = PILImageResampling.BICUBIC , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) # size_dict is a dict with either keys "height" and "width" or "shortest_edge" if "shortest_edge" in size: _lowerCAmelCase = int((256 / 224) * size["shortest_edge"] ) _lowerCAmelCase = get_resize_output_image_size(_lowerCAmelCase , size=_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = {"height": output_size[0], "width": output_size[1]} if "height" not in size_dict or "width" not in size_dict: raise ValueError( f'''Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}''' ) return resize( _lowerCAmelCase , size=(size_dict["height"], size_dict["width"]) , resample=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: _lowerCAmelCase = get_size_dict(_lowerCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(f'''Size dict must have keys \'height\' and \'width\'. Got {size.keys()}''' ) return center_crop(_lowerCAmelCase , size=(size["height"], size["width"]) , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return rescale(_lowerCAmelCase , scale=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = None , **_lowerCAmelCase , ) -> np.ndarray: return normalize(_lowerCAmelCase , mean=_lowerCAmelCase , std=_lowerCAmelCase , data_format=_lowerCAmelCase , **_lowerCAmelCase ) def _snake_case ( self , _lowerCAmelCase , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = ChannelDimension.FIRST , **_lowerCAmelCase , ) -> BatchFeature: _lowerCAmelCase = do_resize if do_resize is not None else self.do_resize _lowerCAmelCase = resample if resample is not None else self.resample _lowerCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop _lowerCAmelCase = do_rescale if do_rescale is not None else self.do_rescale _lowerCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor _lowerCAmelCase = do_normalize if do_normalize is not None else self.do_normalize _lowerCAmelCase = image_mean if image_mean is not None else self.image_mean _lowerCAmelCase = image_std if image_std is not None else self.image_std _lowerCAmelCase = size if size is not None else self.size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , default_to_square=_lowerCAmelCase ) _lowerCAmelCase = crop_size if crop_size is not None else self.crop_size _lowerCAmelCase = get_size_dict(_lowerCAmelCase , param_name="crop_size" ) _lowerCAmelCase = make_list_of_images(_lowerCAmelCase ) if not valid_images(_lowerCAmelCase ): raise ValueError( "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, " "torch.Tensor, tf.Tensor or jax.ndarray." ) if do_resize and size is None: raise ValueError("Size must be specified if do_resize is True." ) if do_center_crop and crop_size is None: raise ValueError("Crop size must be specified if do_center_crop is True." ) if do_rescale and rescale_factor is None: raise ValueError("Rescale factor must be specified if do_rescale is True." ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("Image mean and std must be specified if do_normalize is True." ) # All transformations expect numpy arrays. _lowerCAmelCase = [to_numpy_array(_lowerCAmelCase ) for image in images] if do_resize: _lowerCAmelCase = [self.resize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_center_crop: _lowerCAmelCase = [self.center_crop(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_rescale: _lowerCAmelCase = [self.rescale(_lowerCAmelCase , _lowerCAmelCase ) for image in images] if do_normalize: _lowerCAmelCase = [self.normalize(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = [to_channel_dimension_format(_lowerCAmelCase , _lowerCAmelCase ) for image in images] _lowerCAmelCase = {"pixel_values": images} return BatchFeature(data=_lowerCAmelCase , tensor_type=_lowerCAmelCase )
158
'''simple docstring''' def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' while a != 0: _lowerCAmelCase , _lowerCAmelCase = b % a, a return b def __a(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int ): '''simple docstring''' if gcd(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) != 1: _lowerCAmelCase = F'''mod inverse of {a!r} and {m!r} does not exist''' raise ValueError(SCREAMING_SNAKE_CASE_ ) _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 1, 0, a _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = 0, 1, m while va != 0: _lowerCAmelCase = ua // va _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va return ua % m
158
1
'''simple docstring''' from typing import Dict, List, Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict from ...image_transforms import ( center_crop, convert_to_rgb, get_resize_output_image_size, normalize, rescale, resize, to_channel_dimension_format, ) from ...image_utils import ( OPENAI_CLIP_MEAN, OPENAI_CLIP_STD, ChannelDimension, ImageInput, PILImageResampling, make_list_of_images, to_numpy_array, valid_images, ) from ...utils import TensorType, is_vision_available, logging _A : Any =logging.get_logger(__name__) if is_vision_available(): import PIL class _lowercase ( _UpperCAmelCase ): a = ["pixel_values"] def __init__( self: Union[str, Any] , UpperCamelCase__: List[Any] = True , UpperCamelCase__: str = None , UpperCamelCase__: Any = PILImageResampling.BICUBIC , UpperCamelCase__: Tuple = True , UpperCamelCase__: List[str] = None , UpperCamelCase__: int = True , UpperCamelCase__: Any = 1 / 255 , UpperCamelCase__: Dict = True , UpperCamelCase__: Dict = None , UpperCamelCase__: List[Any] = None , UpperCamelCase__: List[str] = True , **UpperCamelCase__: Tuple , ): super().__init__(**_UpperCAmelCase ) lowerCamelCase__ : str = size if size is not None else {'''shortest_edge''': 224} lowerCamelCase__ : List[Any] = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) lowerCamelCase__ : List[str] = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224} lowerCamelCase__ : Dict = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase , param_name="""crop_size""" ) lowerCamelCase__ : Optional[Any] = do_resize lowerCamelCase__ : Any = size lowerCamelCase__ : Optional[Any] = resample lowerCamelCase__ : List[Any] = do_center_crop lowerCamelCase__ : Optional[Any] = crop_size lowerCamelCase__ : Optional[Any] = do_rescale lowerCamelCase__ : Any = rescale_factor lowerCamelCase__ : List[Any] = do_normalize lowerCamelCase__ : str = image_mean if image_mean is not None else OPENAI_CLIP_MEAN lowerCamelCase__ : Dict = image_std if image_std is not None else OPENAI_CLIP_STD lowerCamelCase__ : str = do_convert_rgb def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[str] , UpperCamelCase__: Tuple , UpperCamelCase__: Union[str, Any] = PILImageResampling.BICUBIC , UpperCamelCase__: Dict = None , **UpperCamelCase__: Optional[Any] , ): lowerCamelCase__ : Tuple = get_size_dict(_UpperCAmelCase , default_to_square=_UpperCAmelCase ) if "shortest_edge" not in size: raise ValueError(F'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' ) lowerCamelCase__ : Dict = get_resize_output_image_size(_UpperCAmelCase , size=size["""shortest_edge"""] , default_to_square=_UpperCAmelCase ) return resize(_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase_ ( self: str , UpperCamelCase__: Optional[Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Optional[Any] = None , **UpperCamelCase__: Tuple , ): lowerCamelCase__ : Dict = get_size_dict(_UpperCAmelCase ) if "height" not in size or "width" not in size: raise ValueError(F'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' ) return center_crop(_UpperCAmelCase , size=(size["""height"""], size["""width"""]) , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase_ ( self: Any , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: int = None , **UpperCamelCase__: Optional[int] , ): return rescale(_UpperCAmelCase , scale=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple , UpperCamelCase__: Dict , UpperCamelCase__: Any = None , **UpperCamelCase__: Union[str, Any] , ): return normalize(_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase , data_format=_UpperCAmelCase , **_UpperCAmelCase ) def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict , UpperCamelCase__: Tuple = None , UpperCamelCase__: Tuple = None , UpperCamelCase__: Union[str, Any] = None , UpperCamelCase__: Any = None , UpperCamelCase__: Any = None , UpperCamelCase__: Union[str, Any] = None , UpperCamelCase__: List[str] = None , UpperCamelCase__: str = None , UpperCamelCase__: Any = None , UpperCamelCase__: Optional[Any] = None , UpperCamelCase__: List[Any] = None , UpperCamelCase__: Optional[Any] = None , UpperCamelCase__: int = ChannelDimension.FIRST , **UpperCamelCase__: List[Any] , ): lowerCamelCase__ : List[Any] = do_resize if do_resize is not None else self.do_resize lowerCamelCase__ : Optional[int] = size if size is not None else self.size lowerCamelCase__ : Optional[int] = get_size_dict(_UpperCAmelCase , param_name="""size""" , default_to_square=_UpperCAmelCase ) lowerCamelCase__ : str = resample if resample is not None else self.resample lowerCamelCase__ : Tuple = do_center_crop if do_center_crop is not None else self.do_center_crop lowerCamelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size lowerCamelCase__ : Dict = get_size_dict(_UpperCAmelCase , param_name="""crop_size""" , default_to_square=_UpperCAmelCase ) lowerCamelCase__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale lowerCamelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor lowerCamelCase__ : str = do_normalize if do_normalize is not None else self.do_normalize lowerCamelCase__ : List[Any] = image_mean if image_mean is not None else self.image_mean lowerCamelCase__ : int = image_std if image_std is not None else self.image_std lowerCamelCase__ : int = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb lowerCamelCase__ : Tuple = make_list_of_images(_UpperCAmelCase ) if not valid_images(_UpperCAmelCase ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_resize and size is None: raise ValueError("""Size must be specified if do_resize is True.""" ) if do_center_crop and crop_size is None: raise ValueError("""Crop size must be specified if do_center_crop is True.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) if do_normalize and (image_mean is None or image_std is None): raise ValueError("""Image mean and std must be specified if do_normalize is True.""" ) # PIL RGBA images are converted to RGB if do_convert_rgb: lowerCamelCase__ : Union[str, Any] = [convert_to_rgb(_UpperCAmelCase ) for image in images] # All transformations expect numpy arrays. lowerCamelCase__ : Optional[int] = [to_numpy_array(_UpperCAmelCase ) for image in images] if do_resize: lowerCamelCase__ : str = [self.resize(image=_UpperCAmelCase , size=_UpperCAmelCase , resample=_UpperCAmelCase ) for image in images] if do_center_crop: lowerCamelCase__ : Optional[Any] = [self.center_crop(image=_UpperCAmelCase , size=_UpperCAmelCase ) for image in images] if do_rescale: lowerCamelCase__ : Any = [self.rescale(image=_UpperCAmelCase , scale=_UpperCAmelCase ) for image in images] if do_normalize: lowerCamelCase__ : Optional[Any] = [self.normalize(image=_UpperCAmelCase , mean=_UpperCAmelCase , std=_UpperCAmelCase ) for image in images] lowerCamelCase__ : Tuple = [to_channel_dimension_format(_UpperCAmelCase , _UpperCAmelCase ) for image in images] lowerCamelCase__ : Dict = {'''pixel_values''': images} return BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
352
'''simple docstring''' import csv from collections import defaultdict from dataclasses import dataclass, field from typing import List, Optional import matplotlib.pyplot as plt import numpy as np from matplotlib.ticker import ScalarFormatter from transformers import HfArgumentParser def SCREAMING_SNAKE_CASE_ (UpperCamelCase=None , UpperCamelCase=None ) -> Any: return field(default_factory=lambda: default , metadata=UpperCamelCase ) @dataclass class _lowercase : a = field( metadata={"""help""": """The csv file to plot."""} , ) a = field( default=_lowercase , metadata={"""help""": """Whether to plot along batch size or sequence length. Defaults to sequence length."""} , ) a = field( default=_lowercase , metadata={"""help""": """Whether the csv file has time results or memory results. Defaults to memory results."""} , ) a = field( default=_lowercase , metadata={"""help""": """Disable logarithmic scale when plotting"""} , ) a = field( default=_lowercase , metadata={ """help""": """Whether the csv file has training results or inference results. Defaults to inference results.""" } , ) a = field( default=_lowercase , metadata={"""help""": """Filename under which the plot will be saved. If unused no plot is saved."""} , ) a = list_field( default=_lowercase , metadata={"""help""": """List of model names that are used instead of the ones in the csv file."""} ) def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict: try: int(UpperCamelCase ) return True except ValueError: return False def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> int: try: float(UpperCamelCase ) return True except ValueError: return False class _lowercase : def __init__( self: Tuple , UpperCamelCase__: str ): lowerCamelCase__ : int = args lowerCamelCase__ : Optional[int] = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} ) with open(self.args.csv_file , newline="""""" ) as csv_file: lowerCamelCase__ : str = csv.DictReader(UpperCamelCase__ ) for row in reader: lowerCamelCase__ : Optional[int] = row["""model"""] self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) ) self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) ) if can_convert_to_int(row["""result"""] ): # value is not None lowerCamelCase__ : Tuple = int(row["""result"""] ) elif can_convert_to_float(row["""result"""] ): # value is not None lowerCamelCase__ : Any = float(row["""result"""] ) def lowerCamelCase_ ( self: str ): lowerCamelCase__ , lowerCamelCase__ : Tuple = plt.subplots() lowerCamelCase__ : Any = """Time usage""" if self.args.is_time else """Memory usage""" lowerCamelCase__ : List[str] = title_str + """ for training""" if self.args.is_train else title_str + """ for inference""" if not self.args.no_log_scale: # set logarithm scales ax.set_xscale("""log""" ) ax.set_yscale("""log""" ) for axis in [ax.xaxis, ax.yaxis]: axis.set_major_formatter(ScalarFormatter() ) for model_name_idx, model_name in enumerate(self.result_dict.keys() ): lowerCamelCase__ : Any = sorted(set(self.result_dict[model_name]["""bsz"""] ) ) lowerCamelCase__ : int = sorted(set(self.result_dict[model_name]["""seq_len"""] ) ) lowerCamelCase__ : Any = self.result_dict[model_name]["""result"""] ((lowerCamelCase__) , (lowerCamelCase__)) : Dict = ( (batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes) ) lowerCamelCase__ : Any = ( model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx] ) for inner_loop_value in inner_loop_array: if self.args.plot_along_batch: lowerCamelCase__ : int = np.asarray( [results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=UpperCamelCase__ , ) else: lowerCamelCase__ : List[Any] = np.asarray( [results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , ) ((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = ( ("""batch_size""", """len""") if self.args.plot_along_batch else ("""in #tokens""", """bsz""") ) lowerCamelCase__ : int = np.asarray(UpperCamelCase__ , UpperCamelCase__ )[: len(UpperCamelCase__ )] plt.scatter( UpperCamelCase__ , UpperCamelCase__ , label=F'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' ) plt.plot(UpperCamelCase__ , UpperCamelCase__ , """--""" ) title_str += F''' {label_model_name} vs.''' lowerCamelCase__ : Any = title_str[:-4] lowerCamelCase__ : Optional[int] = """Time in s""" if self.args.is_time else """Memory in MB""" # plot plt.title(UpperCamelCase__ ) plt.xlabel(UpperCamelCase__ ) plt.ylabel(UpperCamelCase__ ) plt.legend() if self.args.figure_png_file is not None: plt.savefig(self.args.figure_png_file ) else: plt.show() def SCREAMING_SNAKE_CASE_ () -> str: lowerCamelCase__ : str = HfArgumentParser(UpperCamelCase ) lowerCamelCase__ : str = parser.parse_args_into_dataclasses()[0] lowerCamelCase__ : Any = Plot(args=UpperCamelCase ) plot.plot() if __name__ == "__main__": main()
129
0
import math import time from typing import Dict, List, Optional from torch.utils.data import Dataset from transformers import SeqaSeqTrainer, is_torch_tpu_available from transformers.trainer_utils import PredictionOutput, speed_metrics if is_torch_tpu_available(check_device=False): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met class snake_case__ ( lowerCAmelCase_ ): """simple docstring""" def __init__( self : int, *_snake_case : int, _snake_case : Dict=None, _snake_case : Optional[Any]=None, **_snake_case : Dict ) ->Optional[Any]: super().__init__(*_snake_case, **_snake_case ) snake_case__ : Tuple = eval_examples snake_case__ : Optional[Any] = post_process_function def lowercase_ ( self : Dict, _snake_case : Optional[Dataset] = None, _snake_case : Optional[int]=None, _snake_case : Optional[List[str]] = None, _snake_case : str = "eval", **_snake_case : List[str], ) ->Dict[str, float]: snake_case__ : Optional[int] = gen_kwargs.copy() snake_case__ : List[Any] = ( gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length ) snake_case__ : Tuple = ( gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams ) snake_case__ : Dict = gen_kwargs snake_case__ : int = self.eval_dataset if eval_dataset is None else eval_dataset snake_case__ : Union[str, Any] = self.get_eval_dataloader(_snake_case ) snake_case__ : Tuple = self.eval_examples if eval_examples is None else eval_examples # Temporarily disable metric computation, we will do it in the loop here. snake_case__ : str = self.compute_metrics snake_case__ : Optional[int] = None snake_case__ : Optional[Any] = time.time() snake_case__ : List[Any] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: snake_case__ : Optional[Any] = eval_loop( _snake_case, description='Evaluation', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=_snake_case, metric_key_prefix=_snake_case, ) finally: snake_case__ : Any = compute_metrics snake_case__ : List[str] = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _snake_case, _snake_case, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save: # Only the main node write the results by default snake_case__ : List[str] = self.post_process_function(_snake_case, _snake_case, _snake_case ) snake_case__ : List[Any] = self.compute_metrics(_snake_case ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): snake_case__ : Union[str, Any] = metrics.pop(_snake_case ) metrics.update(output.metrics ) else: snake_case__ : List[str] = output.metrics if self.args.should_log: # Only the main node log the results by default self.log(_snake_case ) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report() ) snake_case__ : str = self.callback_handler.on_evaluate(self.args, self.state, self.control, _snake_case ) return metrics def lowercase_ ( self : int, _snake_case : List[Any], _snake_case : Optional[Any], _snake_case : List[Any]=None, _snake_case : str = "test", **_snake_case : List[str] ) ->Any: snake_case__ : int = gen_kwargs.copy() snake_case__ : Any = self.get_test_dataloader(_snake_case ) # Temporarily disable metric computation, we will do it in the loop here. snake_case__ : Optional[Any] = self.compute_metrics snake_case__ : Optional[int] = None snake_case__ : Any = time.time() snake_case__ : int = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop try: snake_case__ : str = eval_loop( _snake_case, description='Prediction', prediction_loss_only=True if compute_metrics is None else None, ignore_keys=_snake_case, metric_key_prefix=_snake_case, ) finally: snake_case__ : Optional[Any] = compute_metrics snake_case__ : str = self.args.eval_batch_size * self.args.world_size if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics: start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time'''] output.metrics.update( speed_metrics( _snake_case, _snake_case, num_samples=output.num_samples, num_steps=math.ceil(output.num_samples / total_batch_size ), ) ) if self.post_process_function is None or self.compute_metrics is None: return output snake_case__ : List[Any] = self.post_process_function(_snake_case, _snake_case, _snake_case, 'predict' ) snake_case__ : Optional[Any] = self.compute_metrics(_snake_case ) # Prefix all keys with metric_key_prefix + '_' for key in list(metrics.keys() ): if not key.startswith(F'''{metric_key_prefix}_''' ): snake_case__ : List[Any] = metrics.pop(_snake_case ) metrics.update(output.metrics ) return PredictionOutput(predictions=predictions.predictions, label_ids=predictions.label_ids, metrics=_snake_case )
277
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available a_ :int = { "configuration_longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config", "LongT5OnnxConfig"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :List[str] = [ "LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST", "LongT5EncoderModel", "LongT5ForConditionalGeneration", "LongT5Model", "LongT5PreTrainedModel", ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: a_ :int = [ "FlaxLongT5ForConditionalGeneration", "FlaxLongT5Model", "FlaxLongT5PreTrainedModel", ] if TYPE_CHECKING: from .configuration_longta import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongTaConfig, LongTaOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_longta import ( LONGT5_PRETRAINED_MODEL_ARCHIVE_LIST, LongTaEncoderModel, LongTaForConditionalGeneration, LongTaModel, LongTaPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_longta import ( FlaxLongTaForConditionalGeneration, FlaxLongTaModel, FlaxLongTaPreTrainedModel, ) else: import sys a_ :Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
277
1
"""simple docstring""" def _A ( SCREAMING_SNAKE_CASE : Any ): """simple docstring""" a__ : List[Any] =[0] * len(lowerCamelCase__ ) a__ : List[Any] =[] a__ : List[Any] =[] a__ : List[Any] =0 for values in graph.values(): for i in values: indegree[i] += 1 for i in range(len(lowerCamelCase__ ) ): if indegree[i] == 0: queue.append(lowerCamelCase__ ) while queue: a__ : List[Any] =queue.pop(0 ) cnt += 1 topo.append(lowerCamelCase__ ) for x in graph[vertex]: indegree[x] -= 1 if indegree[x] == 0: queue.append(lowerCamelCase__ ) if cnt != len(lowerCamelCase__ ): print("Cycle exists" ) else: print(lowerCamelCase__ ) # Adjacency List of Graph UpperCAmelCase : Tuple = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []} topological_sort(graph)
350
import tempfile import torch from diffusers import PNDMScheduler from .test_schedulers import SchedulerCommonTest class __lowerCAmelCase ( UpperCamelCase__): _lowercase : Any = (PNDMScheduler,) _lowercase : str = (("""num_inference_steps""", 50),) def _lowercase ( self , **lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Dict ={ "num_train_timesteps": 1_0_0_0, "beta_start": 0.00_01, "beta_end": 0.02, "beta_schedule": "linear", } config.update(**lowerCAmelCase__ ) return config def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> List[Any]: '''simple docstring''' a__ : Optional[int] =dict(self.forward_default_kwargs ) a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) a__ : List[str] =self.dummy_sample a__ : List[str] =0.1 * sample a__ : str =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a__ : int =self.get_scheduler_config(**lowerCAmelCase__ ) a__ : Union[str, Any] =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals a__ : Any =dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__ ) a__ : List[Any] =scheduler_class.from_pretrained(lowerCAmelCase__ ) new_scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals a__ : str =dummy_past_residuals[:] a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Dict =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a__ : Optional[int] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Union[str, Any] =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self ) -> int: '''simple docstring''' pass def _lowercase ( self , lowerCAmelCase__=0 , **lowerCAmelCase__ ) -> Optional[Any]: '''simple docstring''' a__ : Optional[int] =dict(self.forward_default_kwargs ) a__ : List[str] =kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) a__ : List[str] =self.dummy_sample a__ : int =0.1 * sample a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] for scheduler_class in self.scheduler_classes: a__ : Dict =self.get_scheduler_config() a__ : List[str] =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residuals (must be after setting timesteps) a__ : Dict =dummy_past_residuals[:] with tempfile.TemporaryDirectory() as tmpdirname: scheduler.save_config(lowerCAmelCase__ ) a__ : Dict =scheduler_class.from_pretrained(lowerCAmelCase__ ) # copy over dummy past residuals new_scheduler.set_timesteps(lowerCAmelCase__ ) # copy over dummy past residual (must be after setting timesteps) a__ : Optional[int] =dummy_past_residuals[:] a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : List[Any] =new_scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Any =new_scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical" def _lowercase ( self , **lowerCAmelCase__ ) -> int: '''simple docstring''' a__ : Union[str, Any] =self.scheduler_classes[0] a__ : Optional[Any] =self.get_scheduler_config(**lowerCAmelCase__ ) a__ : Any =scheduler_class(**lowerCAmelCase__ ) a__ : int =1_0 a__ : Union[str, Any] =self.dummy_model() a__ : Optional[int] =self.dummy_sample_deter scheduler.set_timesteps(lowerCAmelCase__ ) for i, t in enumerate(scheduler.prk_timesteps ): a__ : List[Any] =model(lowerCAmelCase__ , lowerCAmelCase__ ) a__ : Optional[Any] =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample for i, t in enumerate(scheduler.plms_timesteps ): a__ : int =model(lowerCAmelCase__ , lowerCAmelCase__ ) a__ : int =scheduler.step_plms(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample return sample def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ : str =dict(self.forward_default_kwargs ) a__ : Tuple =kwargs.pop("num_inference_steps" , lowerCAmelCase__ ) for scheduler_class in self.scheduler_classes: a__ : Union[str, Any] =self.get_scheduler_config() a__ : List[str] =scheduler_class(**lowerCAmelCase__ ) a__ : List[Any] =self.dummy_sample a__ : Dict =0.1 * sample if num_inference_steps is not None and hasattr(lowerCAmelCase__ , "set_timesteps" ): scheduler.set_timesteps(lowerCAmelCase__ ) elif num_inference_steps is not None and not hasattr(lowerCAmelCase__ , "set_timesteps" ): a__ : int =num_inference_steps # copy over dummy past residuals (must be done after set_timesteps) a__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05] a__ : str =dummy_past_residuals[:] a__ : List[Any] =scheduler.step_prk(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : int =scheduler.step_prk(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) a__ : List[str] =scheduler.step_plms(lowerCAmelCase__ , 0 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample a__ : Dict =scheduler.step_plms(lowerCAmelCase__ , 1 , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample self.assertEqual(output_a.shape , sample.shape ) self.assertEqual(output_a.shape , output_a.shape ) def _lowercase ( self ) -> Tuple: '''simple docstring''' for timesteps in [1_0_0, 1_0_0_0]: self.check_over_configs(num_train_timesteps=lowerCAmelCase__ ) def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' for steps_offset in [0, 1]: self.check_over_configs(steps_offset=lowerCAmelCase__ ) a__ : Optional[Any] =self.scheduler_classes[0] a__ : Tuple =self.get_scheduler_config(steps_offset=1 ) a__ : Optional[Any] =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(1_0 ) assert torch.equal( scheduler.timesteps , torch.LongTensor( [9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , ) def _lowercase ( self ) -> Tuple: '''simple docstring''' for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ): self.check_over_configs(beta_start=lowerCAmelCase__ , beta_end=lowerCAmelCase__ ) def _lowercase ( self ) -> List[str]: '''simple docstring''' for schedule in ["linear", "squaredcos_cap_v2"]: self.check_over_configs(beta_schedule=lowerCAmelCase__ ) def _lowercase ( self ) -> List[str]: '''simple docstring''' for prediction_type in ["epsilon", "v_prediction"]: self.check_over_configs(prediction_type=lowerCAmelCase__ ) def _lowercase ( self ) -> Dict: '''simple docstring''' for t in [1, 5, 1_0]: self.check_over_forward(time_step=lowerCAmelCase__ ) def _lowercase ( self ) -> List[Any]: '''simple docstring''' for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ): self.check_over_forward(num_inference_steps=lowerCAmelCase__ ) def _lowercase ( self ) -> str: '''simple docstring''' a__ : Dict =2_7 for scheduler_class in self.scheduler_classes: a__ : Tuple =self.dummy_sample a__ : Dict =0.1 * sample a__ : Dict =self.get_scheduler_config() a__ : int =scheduler_class(**lowerCAmelCase__ ) scheduler.set_timesteps(lowerCAmelCase__ ) # before power of 3 fix, would error on first step, so we only need to do two for i, t in enumerate(scheduler.prk_timesteps[:2] ): a__ : Any =scheduler.step_prk(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ).prev_sample def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' with self.assertRaises(lowerCAmelCase__ ): a__ : List[Any] =self.scheduler_classes[0] a__ : Dict =self.get_scheduler_config() a__ : Tuple =scheduler_class(**lowerCAmelCase__ ) scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample def _lowercase ( self ) -> Optional[Any]: '''simple docstring''' a__ : List[Any] =self.full_loop() a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Optional[Any] =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 1_98.13_18 ) < 1E-2 assert abs(result_mean.item() - 0.25_80 ) < 1E-3 def _lowercase ( self ) -> str: '''simple docstring''' a__ : str =self.full_loop(prediction_type="v_prediction" ) a__ : int =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Optional[int] =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 67.39_86 ) < 1E-2 assert abs(result_mean.item() - 0.08_78 ) < 1E-3 def _lowercase ( self ) -> Optional[int]: '''simple docstring''' a__ : Tuple =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 ) a__ : str =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Dict =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 2_30.03_99 ) < 1E-2 assert abs(result_mean.item() - 0.29_95 ) < 1E-3 def _lowercase ( self ) -> Union[str, Any]: '''simple docstring''' a__ : Dict =self.full_loop(set_alpha_to_one=lowerCAmelCase__ , beta_start=0.01 ) a__ : Union[str, Any] =torch.sum(torch.abs(lowerCAmelCase__ ) ) a__ : Union[str, Any] =torch.mean(torch.abs(lowerCAmelCase__ ) ) assert abs(result_sum.item() - 1_86.94_82 ) < 1E-2 assert abs(result_mean.item() - 0.24_34 ) < 1E-3
148
0