code
stringlengths
82
54.1k
code_codestyle
int64
0
699
style_context
stringlengths
111
35.6k
style_context_codestyle
int64
0
699
label
int64
0
1
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available __UpperCamelCase : Optional[Any] = { """configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""], """configuration_data2vec_text""": [ """DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecTextConfig""", """Data2VecTextOnnxConfig""", ], """configuration_data2vec_vision""": [ """DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecVisionConfig""", """Data2VecVisionOnnxConfig""", ], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __UpperCamelCase : List[Any] = [ """DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecAudioForAudioFrameClassification""", """Data2VecAudioForCTC""", """Data2VecAudioForSequenceClassification""", """Data2VecAudioForXVector""", """Data2VecAudioModel""", """Data2VecAudioPreTrainedModel""", ] __UpperCamelCase : Dict = [ """DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecTextForCausalLM""", """Data2VecTextForMaskedLM""", """Data2VecTextForMultipleChoice""", """Data2VecTextForQuestionAnswering""", """Data2VecTextForSequenceClassification""", """Data2VecTextForTokenClassification""", """Data2VecTextModel""", """Data2VecTextPreTrainedModel""", ] __UpperCamelCase : int = [ """DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""", """Data2VecVisionForImageClassification""", """Data2VecVisionForMaskedImageModeling""", """Data2VecVisionForSemanticSegmentation""", """Data2VecVisionModel""", """Data2VecVisionPreTrainedModel""", ] if is_tf_available(): __UpperCamelCase : List[str] = [ """TFData2VecVisionForImageClassification""", """TFData2VecVisionForSemanticSegmentation""", """TFData2VecVisionModel""", """TFData2VecVisionPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig from .configuration_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecTextConfig, DataaVecTextOnnxConfig, ) from .configuration_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecVisionConfig, DataaVecVisionOnnxConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dataavec_audio import ( DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecAudioForAudioFrameClassification, DataaVecAudioForCTC, DataaVecAudioForSequenceClassification, DataaVecAudioForXVector, DataaVecAudioModel, DataaVecAudioPreTrainedModel, ) from .modeling_dataavec_text import ( DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecTextForCausalLM, DataaVecTextForMaskedLM, DataaVecTextForMultipleChoice, DataaVecTextForQuestionAnswering, DataaVecTextForSequenceClassification, DataaVecTextForTokenClassification, DataaVecTextModel, DataaVecTextPreTrainedModel, ) from .modeling_dataavec_vision import ( DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST, DataaVecVisionForImageClassification, DataaVecVisionForMaskedImageModeling, DataaVecVisionForSemanticSegmentation, DataaVecVisionModel, DataaVecVisionPreTrainedModel, ) if is_tf_available(): from .modeling_tf_dataavec_vision import ( TFDataaVecVisionForImageClassification, TFDataaVecVisionForSemanticSegmentation, TFDataaVecVisionModel, TFDataaVecVisionPreTrainedModel, ) else: import sys __UpperCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
80
def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = int(lowercase__ ) if n_element < 1: SCREAMING_SNAKE_CASE__ : Tuple = ValueError('a should be a positive number' ) raise my_error SCREAMING_SNAKE_CASE__ : Any = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (0, 0, 0) SCREAMING_SNAKE_CASE__ : Any = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") SCREAMING_SNAKE_CASE__ : int = hamming(int(n)) print("-----------------------------------------------------") print(F"""The list with nth numbers is: {hamming_numbers}""") print("-----------------------------------------------------")
85
0
import inspect import unittest from transformers import BitConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import BitBackbone, BitForImageClassification, BitImageProcessor, BitModel from transformers.models.bit.modeling_bit import BIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image class a : """simple docstring""" def __init__( self : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : str=3 , lowerCamelCase : Tuple=32 , lowerCamelCase : Tuple=3 , lowerCamelCase : Any=10 , lowerCamelCase : int=[8, 16, 32, 64] , lowerCamelCase : Optional[int]=[1, 1, 2, 1] , lowerCamelCase : int=True , lowerCamelCase : Optional[Any]=True , lowerCamelCase : Any="relu" , lowerCamelCase : Tuple=3 , lowerCamelCase : str=None , lowerCamelCase : Optional[Any]=["stage2", "stage3", "stage4"] , lowerCamelCase : Union[str, Any]=[2, 3, 4] , lowerCamelCase : str=1 , ) -> Union[str, Any]: __snake_case : Optional[int] = parent __snake_case : Optional[int] = batch_size __snake_case : Any = image_size __snake_case : Optional[int] = num_channels __snake_case : int = embeddings_size __snake_case : Tuple = hidden_sizes __snake_case : Optional[int] = depths __snake_case : Union[str, Any] = is_training __snake_case : Union[str, Any] = use_labels __snake_case : Optional[int] = hidden_act __snake_case : List[str] = num_labels __snake_case : Union[str, Any] = scope __snake_case : List[Any] = len(lowerCamelCase ) __snake_case : int = out_features __snake_case : Tuple = out_indices __snake_case : int = num_groups def __snake_case ( self : Optional[Any] ) -> Optional[int]: __snake_case : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) __snake_case : Tuple = None if self.use_labels: __snake_case : List[Any] = ids_tensor([self.batch_size] , self.num_labels ) __snake_case : str = self.get_config() return config, pixel_values, labels def __snake_case ( self : int ) -> List[str]: return BitConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , out_features=self.out_features , out_indices=self.out_indices , num_groups=self.num_groups , ) def __snake_case ( self : Optional[Any] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : List[Any] ) -> Any: __snake_case : int = BitModel(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : Tuple = model(lowerCamelCase ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __snake_case ( self : List[str] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Optional[int] ) -> int: __snake_case : List[str] = self.num_labels __snake_case : Union[str, Any] = BitForImageClassification(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : Any = model(lowerCamelCase , labels=lowerCamelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __snake_case ( self : Dict , lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : int ) -> str: __snake_case : List[Any] = BitBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : Union[str, Any] = model(lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] ) # verify channels self.parent.assertEqual(len(model.channels ) , len(config.out_features ) ) self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] ) # verify backbone works with out_features=None __snake_case : Dict = None __snake_case : str = BitBackbone(config=lowerCamelCase ) model.to(lowerCamelCase ) model.eval() __snake_case : int = model(lowerCamelCase ) # verify feature maps self.parent.assertEqual(len(result.feature_maps ) , 1 ) self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] ) # verify channels self.parent.assertEqual(len(model.channels ) , 1 ) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] ) def __snake_case ( self : Union[str, Any] ) -> List[Any]: __snake_case : Tuple = self.prepare_config_and_inputs() __snake_case , __snake_case , __snake_case : Optional[Any] = config_and_inputs __snake_case : int = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class a (_lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = (BitModel, BitForImageClassification, BitBackbone) if is_torch_available() else () __UpperCAmelCase : List[str] = ( {"feature-extraction": BitModel, "image-classification": BitForImageClassification} if is_torch_available() else {} ) __UpperCAmelCase : int = False __UpperCAmelCase : Tuple = False __UpperCAmelCase : Any = False __UpperCAmelCase : List[str] = False __UpperCAmelCase : Optional[int] = False def __snake_case ( self : int ) -> Optional[Any]: __snake_case : Any = BitModelTester(self ) __snake_case : Optional[Any] = ConfigTester(self , config_class=lowerCamelCase , has_text_modality=lowerCamelCase ) def __snake_case ( self : str ) -> int: self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __snake_case ( self : Optional[int] ) -> int: return @unittest.skip(reason="Bit does not output attentions" ) def __snake_case ( self : int ) -> Union[str, Any]: pass @unittest.skip(reason="Bit does not use inputs_embeds" ) def __snake_case ( self : int ) -> Dict: pass @unittest.skip(reason="Bit does not support input and output embeddings" ) def __snake_case ( self : str ) -> Optional[Any]: pass def __snake_case ( self : List[Any] ) -> List[Any]: __snake_case , __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Tuple = model_class(lowerCamelCase ) __snake_case : Optional[Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic __snake_case : Dict = [*signature.parameters.keys()] __snake_case : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , lowerCamelCase ) def __snake_case ( self : str ) -> List[str]: __snake_case : Any = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*lowerCamelCase ) def __snake_case ( self : str ) -> Any: __snake_case : List[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*lowerCamelCase ) def __snake_case ( self : List[Any] ) -> Tuple: __snake_case , __snake_case : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: __snake_case : Union[str, Any] = model_class(config=lowerCamelCase ) for name, module in model.named_modules(): if isinstance(lowerCamelCase , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=F'Parameter {name} of model {model_class} seems not properly initialized' , ) def __snake_case ( self : Dict ) -> List[Any]: def check_hidden_states_output(lowerCamelCase : Dict , lowerCamelCase : Dict , lowerCamelCase : Any ): __snake_case : Union[str, Any] = model_class(lowerCamelCase ) model.to(lowerCamelCase ) model.eval() with torch.no_grad(): __snake_case : List[str] = model(**self._prepare_for_class(lowerCamelCase , lowerCamelCase ) ) __snake_case : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states __snake_case : List[Any] = self.model_tester.num_stages self.assertEqual(len(lowerCamelCase ) , expected_num_stages + 1 ) # Bit's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , ) __snake_case , __snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() __snake_case : Tuple = ["preactivation", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: __snake_case : Optional[Any] = layer_type __snake_case : str = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] __snake_case : str = True check_hidden_states_output(lowerCamelCase , lowerCamelCase , lowerCamelCase ) @unittest.skip(reason="Bit does not use feedforward chunking" ) def __snake_case ( self : str ) -> Union[str, Any]: pass def __snake_case ( self : Dict ) -> str: __snake_case : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*lowerCamelCase ) @slow def __snake_case ( self : List[Any] ) -> List[Any]: for model_name in BIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: __snake_case : Tuple = BitModel.from_pretrained(lowerCamelCase ) self.assertIsNotNone(lowerCamelCase ) def lowerCAmelCase_ ( ): __snake_case : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class a (unittest.TestCase ): """simple docstring""" @cached_property def __snake_case ( self : Any ) -> Tuple: return ( BitImageProcessor.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __snake_case ( self : List[Any] ) -> List[str]: __snake_case : str = BitForImageClassification.from_pretrained(BIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(lowerCamelCase ) __snake_case : str = self.default_image_processor __snake_case : Dict = prepare_img() __snake_case : Tuple = image_processor(images=lowerCamelCase , return_tensors="pt" ).to(lowerCamelCase ) # forward pass with torch.no_grad(): __snake_case : Optional[int] = model(**lowerCamelCase ) # verify the logits __snake_case : Tuple = torch.Size((1, 1000) ) self.assertEqual(outputs.logits.shape , lowerCamelCase ) __snake_case : Dict = torch.tensor([[-0.65_26, -0.52_63, -1.43_98]] ).to(lowerCamelCase ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCamelCase , atol=1E-4 ) ) @require_torch class a (_lowerCAmelCase , unittest.TestCase ): """simple docstring""" __UpperCAmelCase : str = (BitBackbone,) if is_torch_available() else () __UpperCAmelCase : str = BitConfig __UpperCAmelCase : List[Any] = False def __snake_case ( self : Union[str, Any] ) -> Optional[int]: __snake_case : Optional[int] = BitModelTester(self )
81
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" import unittest from transformers import MobileBertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class lowercase__ : '''simple docstring''' def __init__( self : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : List[str]=13 , _UpperCAmelCase : List[Any]=7 , _UpperCAmelCase : Optional[Any]=True , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=True , _UpperCAmelCase : List[str]=True , _UpperCAmelCase : List[Any]=99 , _UpperCAmelCase : int=64 , _UpperCAmelCase : int=32 , _UpperCAmelCase : int=5 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : Tuple=37 , _UpperCAmelCase : List[str]="gelu" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Tuple=0.1 , _UpperCAmelCase : Any=512 , _UpperCAmelCase : Optional[int]=16 , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Union[str, Any]=0.02 , _UpperCAmelCase : Any=3 , _UpperCAmelCase : Optional[int]=4 , _UpperCAmelCase : str=None , ) -> Optional[int]: '''simple docstring''' UpperCAmelCase_ = parent UpperCAmelCase_ = batch_size UpperCAmelCase_ = seq_length UpperCAmelCase_ = is_training UpperCAmelCase_ = use_input_mask UpperCAmelCase_ = use_token_type_ids UpperCAmelCase_ = use_labels UpperCAmelCase_ = vocab_size UpperCAmelCase_ = hidden_size UpperCAmelCase_ = embedding_size UpperCAmelCase_ = num_hidden_layers UpperCAmelCase_ = num_attention_heads UpperCAmelCase_ = intermediate_size UpperCAmelCase_ = hidden_act UpperCAmelCase_ = hidden_dropout_prob UpperCAmelCase_ = attention_probs_dropout_prob UpperCAmelCase_ = max_position_embeddings UpperCAmelCase_ = type_vocab_size UpperCAmelCase_ = type_sequence_label_size UpperCAmelCase_ = initializer_range UpperCAmelCase_ = num_labels UpperCAmelCase_ = num_choices UpperCAmelCase_ = scope def lowercase__ ( self : Dict ) -> str: '''simple docstring''' UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase_ = None if self.use_input_mask: UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase_ = None if self.use_token_type_ids: UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase_ = None UpperCAmelCase_ = None UpperCAmelCase_ = None if self.use_labels: UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase_ = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def lowercase__ ( self : List[Any] ) -> Any: '''simple docstring''' return MobileBertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , ) def lowercase__ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = MobileBertModel(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) UpperCAmelCase_ = model(_UpperCAmelCase , token_type_ids=_UpperCAmelCase ) UpperCAmelCase_ = model(_UpperCAmelCase ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def lowercase__ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any ) -> str: '''simple docstring''' UpperCAmelCase_ = MobileBertForMaskedLM(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Any ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = MobileBertForNextSentencePrediction(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[str] , _UpperCAmelCase : Any ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = MobileBertForPreTraining(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , next_sentence_label=_UpperCAmelCase , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) ) def lowercase__ ( self : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[int] ) -> int: '''simple docstring''' UpperCAmelCase_ = MobileBertForQuestionAnswering(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , start_positions=_UpperCAmelCase , end_positions=_UpperCAmelCase , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def lowercase__ ( self : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Any , _UpperCAmelCase : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MobileBertForSequenceClassification(_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def lowercase__ ( self : Tuple , _UpperCAmelCase : int , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict , _UpperCAmelCase : Tuple ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = self.num_labels UpperCAmelCase_ = MobileBertForTokenClassification(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def lowercase__ ( self : str , _UpperCAmelCase : List[str] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = self.num_choices UpperCAmelCase_ = MobileBertForMultipleChoice(config=_UpperCAmelCase ) model.to(_UpperCAmelCase ) model.eval() UpperCAmelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase_ = model( _UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , labels=_UpperCAmelCase , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def lowercase__ ( self : Optional[Any] ) -> Tuple: '''simple docstring''' UpperCAmelCase_ = self.prepare_config_and_inputs() ( ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ( UpperCAmelCase_ ) , ) = config_and_inputs UpperCAmelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ): '''simple docstring''' UpperCamelCase = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) UpperCamelCase = ( { '''feature-extraction''': MobileBertModel, '''fill-mask''': MobileBertForMaskedLM, '''question-answering''': MobileBertForQuestionAnswering, '''text-classification''': MobileBertForSequenceClassification, '''token-classification''': MobileBertForTokenClassification, '''zero-shot''': MobileBertForSequenceClassification, } if is_torch_available() else {} ) UpperCamelCase = True def lowercase__ ( self : Tuple , _UpperCAmelCase : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int]=False ) -> Any: '''simple docstring''' UpperCAmelCase_ = super()._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase ) if return_labels: if model_class in get_values(_UpperCAmelCase ): UpperCAmelCase_ = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_UpperCAmelCase ) UpperCAmelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_UpperCAmelCase ) return inputs_dict def lowercase__ ( self : List[str] ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = MobileBertModelTester(self ) UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 ) def lowercase__ ( self : List[Any] ) -> Dict: '''simple docstring''' self.config_tester.run_common_tests() def lowercase__ ( self : str ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*_UpperCAmelCase ) def lowercase__ ( self : int ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*_UpperCAmelCase ) def lowercase__ ( self : Any ) -> Union[str, Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_UpperCAmelCase ) def lowercase__ ( self : List[Any] ) -> List[Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_UpperCAmelCase ) def lowercase__ ( self : Tuple ) -> Any: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*_UpperCAmelCase ) def lowercase__ ( self : Dict ) -> str: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*_UpperCAmelCase ) def lowercase__ ( self : Optional[Any] ) -> List[str]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_UpperCAmelCase ) def lowercase__ ( self : Optional[int] ) -> Optional[Any]: '''simple docstring''' UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*_UpperCAmelCase ) def a__ ( lowerCAmelCase__ ): return torch.tensor( lowerCAmelCase__ , dtype=torch.long , device=lowerCAmelCase__ , ) lowerCamelCase = 1e-3 @require_torch @require_sentencepiece @require_tokenizers class lowercase__ ( unittest.TestCase ): '''simple docstring''' @slow def lowercase__ ( self : Dict ) -> Dict: '''simple docstring''' UpperCAmelCase_ = MobileBertModel.from_pretrained("google/mobilebert-uncased" ).to(_UpperCAmelCase ) UpperCAmelCase_ = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]] ) with torch.no_grad(): UpperCAmelCase_ = model(_UpperCAmelCase )[0] UpperCAmelCase_ = torch.Size((1, 9, 512) ) self.assertEqual(output.shape , _UpperCAmelCase ) UpperCAmelCase_ = torch.tensor( [ [ [-2.4736526e07, 8.2691656e04, 1.6521838e05], [-5.7541704e-01, 3.9056022e00, 4.4011507e00], [2.6047359e00, 1.5677652e00, -1.7324188e-01], ] ] , device=_UpperCAmelCase , ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE UpperCAmelCase_ = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE ) UpperCAmelCase_ = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE ) self.assertTrue(lower_bound and upper_bound )
82
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[str] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" from torch import nn def snake_case_ ( A_ : int ): '''simple docstring''' if act_fn in ["swish", "silu"]: return nn.SiLU() elif act_fn == "mish": return nn.Mish() elif act_fn == "gelu": return nn.GELU() else: raise ValueError(F'''Unsupported activation function: {act_fn}''' )
83
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg") SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def _a ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def _a ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def _a ( ): '''simple docstring''' assert med.median_filter(lowercase__ , 3 ).any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
85
0
import os def UpperCAmelCase_ ( ): lowercase = os.path.join(os.path.dirname(__SCREAMING_SNAKE_CASE ) , 'num.txt' ) with open(__SCREAMING_SNAKE_CASE ) as file_hand: return str(sum(int(__SCREAMING_SNAKE_CASE ) for line in file_hand ) )[:10] if __name__ == "__main__": print(solution())
84
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: SCREAMING_SNAKE_CASE__ : Tuple = json.load(f) @require_torch class snake_case ( unittest.TestCase ): def __lowercase( self : List[str] , a_ : Any )-> str: """simple docstring""" return FSMTTokenizer.from_pretrained(a_ ) def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]: """simple docstring""" # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ ) SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src'] SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt'] SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode( a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ ) print(a_ ) self.assertGreaterEqual(scores['bleu'] , a_ )
85
0
from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast from ...utils import logging if TYPE_CHECKING: from ...feature_extraction_utils import FeatureExtractionMixin from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType __a :Optional[Any] = logging.get_logger(__name__) __a :Dict = { 'openai/whisper-base': 'https://huggingface.co/openai/whisper-base/resolve/main/config.json', } # fmt: off __a :Dict = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 357, 366, 438, 532, 685, 705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377, 1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211, 4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 1_0563, 1_0786, 1_1420, 1_1709, 1_1907, 1_3163, 1_3697, 1_3700, 1_4808, 1_5306, 1_6410, 1_6791, 1_7992, 1_9203, 1_9510, 2_0724, 2_2305, 2_2935, 2_7007, 3_0109, 3_0420, 3_3409, 3_4949, 4_0283, 4_0493, 4_0549, 4_7282, 4_9146, 5_0257, 5_0359, 5_0360, 5_0361 ] __a :Optional[int] = [ 1, 2, 7, 8, 9, 10, 14, 25, 26, 27, 28, 29, 31, 58, 59, 60, 61, 62, 63, 90, 91, 92, 93, 359, 503, 522, 542, 873, 893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627, 3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647, 7273, 9061, 9383, 1_0428, 1_0929, 1_1938, 1_2033, 1_2331, 1_2562, 1_3793, 1_4157, 1_4635, 1_5265, 1_5618, 1_6553, 1_6604, 1_8362, 1_8956, 2_0075, 2_1675, 2_2520, 2_6130, 2_6161, 2_6435, 2_8279, 2_9464, 3_1650, 3_2302, 3_2470, 3_6865, 4_2863, 4_7425, 4_9870, 5_0254, 5_0258, 5_0360, 5_0361, 5_0362 ] class _a ( snake_case_ ): """simple docstring""" _lowerCamelCase : Tuple = 'whisper' _lowerCamelCase : Optional[Any] = ['past_key_values'] _lowerCamelCase : List[Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'} def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple=51865 , UpperCAmelCase : Dict=80 , UpperCAmelCase : Any=6 , UpperCAmelCase : Optional[Any]=4 , UpperCAmelCase : Optional[int]=6 , UpperCAmelCase : Any=4 , UpperCAmelCase : str=1536 , UpperCAmelCase : Optional[int]=1536 , UpperCAmelCase : Optional[Any]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : Tuple=50257 , UpperCAmelCase : List[str]=True , UpperCAmelCase : str=True , UpperCAmelCase : str="gelu" , UpperCAmelCase : Optional[Any]=256 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : Any=0.0 , UpperCAmelCase : str=0.0 , UpperCAmelCase : List[str]=0.02 , UpperCAmelCase : List[str]=False , UpperCAmelCase : str=1500 , UpperCAmelCase : Any=448 , UpperCAmelCase : str=50256 , UpperCAmelCase : List[Any]=50256 , UpperCAmelCase : Any=50256 , UpperCAmelCase : int=None , UpperCAmelCase : Optional[Any]=[220, 50256] , UpperCAmelCase : Optional[Any]=False , UpperCAmelCase : int=256 , UpperCAmelCase : List[str]=False , UpperCAmelCase : Dict=0.05 , UpperCAmelCase : List[str]=10 , UpperCAmelCase : int=2 , UpperCAmelCase : Tuple=0.0 , UpperCAmelCase : int=10 , UpperCAmelCase : Optional[int]=0 , UpperCAmelCase : Dict=7 , **UpperCAmelCase : List[Any] , ): A_ = vocab_size A_ = num_mel_bins A_ = d_model A_ = encoder_layers A_ = encoder_attention_heads A_ = decoder_layers A_ = decoder_attention_heads A_ = decoder_ffn_dim A_ = encoder_ffn_dim A_ = dropout A_ = attention_dropout A_ = activation_dropout A_ = activation_function A_ = init_std A_ = encoder_layerdrop A_ = decoder_layerdrop A_ = use_cache A_ = encoder_layers A_ = scale_embedding # scale factor will be sqrt(d_model) if True A_ = max_source_positions A_ = max_target_positions # Audio Classification-specific parameters. Feel free to ignore for other classes. A_ = classifier_proj_size A_ = use_weighted_layer_sum # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 A_ = apply_spec_augment A_ = mask_time_prob A_ = mask_time_length A_ = mask_time_min_masks A_ = mask_feature_prob A_ = mask_feature_length A_ = mask_feature_min_masks A_ = median_filter_width super().__init__( pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , suppress_tokens=UpperCAmelCase , begin_suppress_tokens=UpperCAmelCase , **UpperCAmelCase , ) class _a ( snake_case_ ): """simple docstring""" @property def __A ( self : Optional[Any] ): A_ = OrderedDict( [ ("input_features", {0: "batch", 1: "feature_size", 2: "encoder_sequence"}), ] ) if self.use_past: A_ = {0: "batch"} else: A_ = {0: "batch", 1: "decoder_sequence"} if self.use_past: self.fill_with_past_key_values_(UpperCAmelCase , direction="inputs" ) return common_inputs def __A ( self : List[Any] , UpperCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , UpperCAmelCase : int = -1 , UpperCAmelCase : int = -1 , UpperCAmelCase : bool = False , UpperCAmelCase : Optional["TensorType"] = None , UpperCAmelCase : int = 22050 , UpperCAmelCase : float = 5.0 , UpperCAmelCase : int = 220 , ): A_ = OrderedDict() A_ = OnnxConfig.generate_dummy_inputs( self , preprocessor=preprocessor.feature_extractor , batch_size=UpperCAmelCase , framework=UpperCAmelCase , sampling_rate=UpperCAmelCase , time_duration=UpperCAmelCase , frequency=UpperCAmelCase , ) A_ = encoder_inputs["input_features"].shape[2] A_ = encoder_sequence_length // 2 if self.use_past else seq_length A_ = super().generate_dummy_inputs( preprocessor.tokenizer , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ) A_ = encoder_inputs.pop("input_features" ) A_ = decoder_inputs.pop("decoder_input_ids" ) if "past_key_values" in decoder_inputs: A_ = decoder_inputs.pop("past_key_values" ) return dummy_inputs @property def __A ( self : Dict ): return 1E-3
86
import os import pytest from attr import dataclass SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region @dataclass class snake_case : lowercase_ = 42 lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' lowercase_ = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } lowercase_ = {**hyperparameters, 'max_steps': 1_000} @property def __lowercase( self : List[str] )-> str: """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" return F'''{self.framework}-transfromers-test''' @property def __lowercase( self : int )-> str: """simple docstring""" return F'''./tests/sagemaker/scripts/{self.framework}''' @property def __lowercase( self : Tuple )-> str: """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def _a ( lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
85
0
import unittest from transformers import JukeboxTokenizer from transformers.testing_utils import require_torch class UpperCamelCase_ ( unittest.TestCase ): '''simple docstring''' UpperCAmelCase__ = JukeboxTokenizer UpperCAmelCase__ = { '''artist''': '''Zac Brown Band''', '''genres''': '''Country''', '''lyrics''': '''I met a traveller from an antique land, Who said "Two vast and trunkless legs of stone Stand in the desert. . . . Near them, on the sand, Half sunk a shattered visage lies, whose frown, And wrinkled lip, and sneer of cold command, Tell that its sculptor well those passions read Which yet survive, stamped on these lifeless things, The hand that mocked them, and the heart that fed; And on the pedestal, these words appear: My name is Ozymandias, King of Kings; Look on my Works, ye Mighty, and despair! Nothing beside remains. Round the decay Of that colossal Wreck, boundless and bare The lone and level sands stretch far away ''', } @require_torch def SCREAMING_SNAKE_CASE ( self : Dict) ->int: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-1b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 7_169, 507, 9, 76, 39, 31, 46, 76, 27, 76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32, 44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43, 47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35, 30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76, 27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45, 45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46, 41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31, 76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63, 76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39, 64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8, 27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45, 34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45, 27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34, 41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49, 44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64, 76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41, 32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27, 40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46, 45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49, 31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27, 45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78, 76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29, 34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48, 31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41, 40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31, 38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39, 41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76, 27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44, 46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45, 46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49, 41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65, 78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76, 40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33, 76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76, 76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76, 41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64, 76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76, 27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67, 78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46, 34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76, 44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47, 40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76, 46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27, 38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47, 40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28, 27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30, 76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45, 76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44, 76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76, 76, 76]]), torch.tensor([[0, 0, 0, 1_069, 11]]), torch.tensor([[0, 0, 0, 1_069, 11]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2])) @require_torch def SCREAMING_SNAKE_CASE ( self : List[str]) ->Optional[int]: '''simple docstring''' import torch A__ = JukeboxTokenizer.from_pretrained('''openai/jukebox-5b-lyrics''') A__ = tokenizer(**self.metas)['''input_ids'''] # fmt: off A__ = [ torch.tensor([[ 0, 0, 0, 1_069, 11, -1, -1, -1, -1, 9, 77, 39, 31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38, 31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27, 40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41, 77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48, 27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40, 37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41, 32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40, 77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63, 77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77, 46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31, 77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37, 77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30, 77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45, 64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49, 40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77, 38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31, 31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29, 41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27, 46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46, 41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45, 31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44, 31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47, 44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42, 31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77, 38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35, 40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34, 27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34, 31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77, 34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32, 31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42, 31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31, 45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42, 31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77, 77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77, 15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77, 11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33, 45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12, 41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41, 44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34, 46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42, 27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77, 77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45, 35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63, 77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30, 31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38, 41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64, 77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27, 40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77, 77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31, 77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45, 27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34, 77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77, 77]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), torch.tensor([[0, 0, 0, 1_069, 11, -1, -1, -1, -1]]), ] # fmt: on self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0])) self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1])) self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2]))
87
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = FunnelTokenizer lowercase_ = FunnelTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : str = [ '<unk>', '<cls>', '<sep>', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase( self : Any , **a_ : Any )-> List[str]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Tuple , **a_ : List[Any] )-> List[Any]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE__ : int = 'unwanted, running' return input_text, output_text def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(do_lower_case=a_ ) for tokenizer in tokenizers: SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer('UNwant\u00E9d,running' ) SCREAMING_SNAKE_CASE__ : List[Any] = len(inputs['input_ids'] ) - 1 self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' ) self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
85
0
"""simple docstring""" import pytest from datasets import Dataset, DatasetDict, Features, NamedSplit, Value from datasets.io.text import TextDatasetReader from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def _snake_case ( __snake_case : List[str] , __snake_case : List[str] ): """simple docstring""" assert isinstance(__snake_case , __snake_case ) assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _snake_case ( __snake_case : int , __snake_case : List[str] , __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : List[str] = tmp_path / """cache""" _lowerCamelCase : int = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCamelCase : str = TextDatasetReader(__snake_case , cache_dir=__snake_case , keep_in_memory=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _snake_case ( __snake_case : Optional[int] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Tuple = tmp_path / """cache""" _lowerCamelCase : Dict = {"""text""": """string"""} _lowerCamelCase : Optional[int] = features.copy() if features else default_expected_features _lowerCamelCase : Dict = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCamelCase : Optional[int] = TextDatasetReader(__snake_case , features=__snake_case , cache_dir=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _snake_case ( __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Any = tmp_path / """cache""" _lowerCamelCase : Dict = {"""text""": """string"""} _lowerCamelCase : int = TextDatasetReader(__snake_case , cache_dir=__snake_case , split=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def _snake_case ( __snake_case : List[Any] , __snake_case : Any , __snake_case : List[str] ): """simple docstring""" if issubclass(__snake_case , __snake_case ): _lowerCamelCase : int = text_path elif issubclass(__snake_case , __snake_case ): _lowerCamelCase : List[str] = [text_path] _lowerCamelCase : int = tmp_path / """cache""" _lowerCamelCase : List[Any] = {"""text""": """string"""} _lowerCamelCase : Any = TextDatasetReader(__snake_case , cache_dir=__snake_case ).read() _check_text_dataset(__snake_case , __snake_case ) def _snake_case ( __snake_case : Dict , __snake_case : Any , __snake_case : List[Any]=("train",) ): """simple docstring""" assert isinstance(__snake_case , __snake_case ) for split in splits: _lowerCamelCase : List[str] = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 1 assert dataset.column_names == ["text"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def _snake_case ( __snake_case : Optional[Any] , __snake_case : List[Any] , __snake_case : Tuple ): """simple docstring""" _lowerCamelCase : List[Any] = tmp_path / """cache""" _lowerCamelCase : Dict = {"""text""": """string"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _lowerCamelCase : Union[str, Any] = TextDatasetReader({"""train""": text_path} , cache_dir=__snake_case , keep_in_memory=__snake_case ).read() _check_text_datasetdict(__snake_case , __snake_case ) @pytest.mark.parametrize( """features""" , [ None, {"""text""": """string"""}, {"""text""": """int32"""}, {"""text""": """float32"""}, ] , ) def _snake_case ( __snake_case : str , __snake_case : Optional[Any] , __snake_case : Union[str, Any] ): """simple docstring""" _lowerCamelCase : Optional[int] = tmp_path / """cache""" # CSV file loses col_1 string dtype information: default now is "int64" instead of "string" _lowerCamelCase : int = {"""text""": """string"""} _lowerCamelCase : Optional[Any] = features.copy() if features else default_expected_features _lowerCamelCase : List[Any] = ( Features({feature: Value(__snake_case ) for feature, dtype in features.items()} ) if features is not None else None ) _lowerCamelCase : Dict = TextDatasetReader({"""train""": text_path} , features=__snake_case , cache_dir=__snake_case ).read() _check_text_datasetdict(__snake_case , __snake_case ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def _snake_case ( __snake_case : Optional[int] , __snake_case : Optional[Any] , __snake_case : Any ): """simple docstring""" if split: _lowerCamelCase : Any = {split: text_path} else: _lowerCamelCase : List[str] = """train""" _lowerCamelCase : List[str] = {"""train""": text_path, """test""": text_path} _lowerCamelCase : str = tmp_path / """cache""" _lowerCamelCase : Optional[Any] = {"""text""": """string"""} _lowerCamelCase : Dict = TextDatasetReader(__snake_case , cache_dir=__snake_case ).read() _check_text_datasetdict(__snake_case , __snake_case , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() )
88
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case ( UpperCamelCase_ ): lowercase_ = 'levit' def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : Any = image_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Any = kernel_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride SCREAMING_SNAKE_CASE__ : Any = padding SCREAMING_SNAKE_CASE__ : Any = hidden_sizes SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = key_dim SCREAMING_SNAKE_CASE__ : int = drop_path_rate SCREAMING_SNAKE_CASE__ : List[str] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case ( UpperCamelCase_ ): lowercase_ = version.parse('1.11' ) @property def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowercase( self : Any )-> float: """simple docstring""" return 1e-4
85
0
def UpperCamelCase_( lowerCamelCase_ ) -> str: if number > 0: raise ValueError('input must be a negative integer' ) _lowercase : Any = len(bin(lowerCamelCase_ )[3:] ) _lowercase : List[Any] = bin(abs(lowerCamelCase_ ) - (1 << binary_number_length) )[3:] _lowercase : Optional[int] = ( ( '1' + '0' * (binary_number_length - len(lowerCamelCase_ )) + twos_complement_number ) if number < 0 else '0' ) return "0b" + twos_complement_number if __name__ == "__main__": import doctest doctest.testmod()
89
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = StableDiffusionInstructPixaPixPipeline lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowercase( self : str )-> int: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ ) SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE__ : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' ) if str(a_ ).startswith('mps' ): SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries' SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ ) SCREAMING_SNAKE_CASE__ : Dict = output.images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2 SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5 SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(a_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae'] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def __lowercase( self : Tuple )-> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) SCREAMING_SNAKE_CASE__ : Tuple = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 0 def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None: SCREAMING_SNAKE_CASE__ : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() pipe(**a_ , callback=a_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __lowercase( self : int )-> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) ) SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
85
0
'''simple docstring''' import logging import os from typing import Dict, List, Optional, Union import torch import torch.nn as nn from accelerate.utils.imports import ( is_abit_bnb_available, is_abit_bnb_available, is_bnb_available, ) from ..big_modeling import dispatch_model, init_empty_weights from .dataclasses import BnbQuantizationConfig from .modeling import ( find_tied_parameters, get_balanced_memory, infer_auto_device_map, load_checkpoint_in_model, offload_weight, set_module_tensor_to_device, ) if is_bnb_available(): import bitsandbytes as bnb from copy import deepcopy __UpperCAmelCase = logging.getLogger(__name__) def _snake_case ( A , A , A = None , A = None , A = None , A = None , A = None , A = False , ) -> Union[str, Any]: lowerCAmelCase__ = bnb_quantization_config.load_in_abit lowerCAmelCase__ = bnb_quantization_config.load_in_abit if load_in_abit and not is_abit_bnb_available(): raise ImportError( '''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,''' ''' make sure you have the latest version of `bitsandbytes` installed.''' ) if load_in_abit and not is_abit_bnb_available(): raise ValueError( '''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,''' '''make sure you have the latest version of `bitsandbytes` installed.''' ) lowerCAmelCase__ = [] # custom device map if isinstance(A , A ) and len(device_map.keys() ) > 1: lowerCAmelCase__ = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']] # We keep some modules such as the lm_head in their original dtype for numerical stability reasons if bnb_quantization_config.skip_modules is None: lowerCAmelCase__ = get_keys_to_not_convert(A ) # add cpu modules to skip modules only for 4-bit modules if load_in_abit: bnb_quantization_config.skip_modules.extend(A ) lowerCAmelCase__ = bnb_quantization_config.skip_modules # We add the modules we want to keep in full precision if bnb_quantization_config.keep_in_fpaa_modules is None: lowerCAmelCase__ = [] lowerCAmelCase__ = bnb_quantization_config.keep_in_fpaa_modules modules_to_not_convert.extend(A ) # compatibility with peft lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = load_in_abit lowerCAmelCase__ = get_parameter_device(A ) if model_device.type != "meta": # quantization of an already loaded model logger.warning( '''It is not recommended to quantize a loaded model. ''' '''The model should be instantiated under the `init_empty_weights` context manager.''' ) lowerCAmelCase__ = replace_with_bnb_layers(A , A , modules_to_not_convert=A ) # convert param to the right dtype lowerCAmelCase__ = bnb_quantization_config.torch_dtype for name, param in model.state_dict().items(): if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ): param.to(torch.floataa ) if param.dtype != torch.floataa: lowerCAmelCase__ = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' ) lowerCAmelCase__ = getattr(A , A , A ) if param is not None: param.to(torch.floataa ) elif torch.is_floating_point(A ): param.to(A ) if model_device.type == "cuda": # move everything to cpu in the first place because we can't do quantization if the weights are already on cuda model.cuda(torch.cuda.current_device() ) torch.cuda.empty_cache() elif torch.cuda.is_available(): model.to(torch.cuda.current_device() ) else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info( F"""The model device type is {model_device.type}. However, cuda is needed for quantization.""" '''We move the model to cuda.''' ) return model elif weights_location is None: raise RuntimeError( F"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ ) else: with init_empty_weights(): lowerCAmelCase__ = replace_with_bnb_layers( A , A , modules_to_not_convert=A ) lowerCAmelCase__ = get_quantized_model_device_map( A , A , A , max_memory=A , no_split_module_classes=A , ) if offload_state_dict is None and device_map is not None and "disk" in device_map.values(): lowerCAmelCase__ = True lowerCAmelCase__ = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] ) load_checkpoint_in_model( A , A , A , dtype=bnb_quantization_config.torch_dtype , offload_folder=A , offload_state_dict=A , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , ) return dispatch_model(A , device_map=A , offload_dir=A ) def _snake_case ( A , A , A=None , A=None , A=None ) -> List[Any]: if device_map is None: if torch.cuda.is_available(): lowerCAmelCase__ = {'''''': torch.cuda.current_device()} else: raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' ) logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' ) if isinstance(A , A ): if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]: raise ValueError( '''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or ''' '''\'sequential\'.''' ) lowerCAmelCase__ = {} special_dtypes.update( { name: bnb_quantization_config.torch_dtype for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.skip_modules ) } ) special_dtypes.update( { name: torch.floataa for name, _ in model.named_parameters() if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules ) } ) lowerCAmelCase__ = {} lowerCAmelCase__ = special_dtypes lowerCAmelCase__ = no_split_module_classes lowerCAmelCase__ = bnb_quantization_config.target_dtype # get max_memory for each device. if device_map != "sequential": lowerCAmelCase__ = get_balanced_memory( A , low_zero=(device_map == '''balanced_low_0''') , max_memory=A , **A , ) lowerCAmelCase__ = max_memory lowerCAmelCase__ = infer_auto_device_map(A , **A ) if isinstance(A , A ): # check if don't have any quantized module on the cpu lowerCAmelCase__ = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules lowerCAmelCase__ = { key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert } for device in ["cpu", "disk"]: if device in device_map_without_some_modules.values(): if bnb_quantization_config.load_in_abit: raise ValueError( ''' Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit the quantized model. If you want to dispatch the model on the CPU or the disk while keeping these modules in `torch_dtype`, you need to pass a custom `device_map` to `load_and_quantize_model`. Check https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk for more details. ''' ) else: logger.info( '''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' ) del device_map_without_some_modules return device_map def _snake_case ( A , A , A=None , A=None ) -> Any: if modules_to_not_convert is None: lowerCAmelCase__ = [] lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) if not has_been_replaced: logger.warning( '''You are loading your model in 8bit or 4bit but no linear modules were found in your model.''' ''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.''' ''' Please double check your model architecture, or submit an issue on github if you think this is''' ''' a bug.''' ) return model def _snake_case ( A , A , A=None , A=None , ) -> Optional[Any]: lowerCAmelCase__ = False for name, module in model.named_children(): if current_key_name is None: lowerCAmelCase__ = [] current_key_name.append(A ) if isinstance(A , nn.Linear ) and name not in modules_to_not_convert: # Check if the current key is not in the `modules_to_not_convert` lowerCAmelCase__ = '''.'''.join(A ) lowerCAmelCase__ = True for key in modules_to_not_convert: if ( (key in current_key_name_str) and (key + "." in current_key_name_str) ) or key == current_key_name_str: lowerCAmelCase__ = False break if proceed: # Load bnb module with empty weight and replace ``nn.Linear` module if bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.LinearabitLt( module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A , threshold=bnb_quantization_config.llm_inta_threshold , ) elif bnb_quantization_config.load_in_abit: lowerCAmelCase__ = bnb.nn.Linearabit( module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , ) else: raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' ) lowerCAmelCase__ = module.weight.data if module.bias is not None: lowerCAmelCase__ = module.bias.data bnb_module.requires_grad_(A ) setattr(A , A , A ) lowerCAmelCase__ = True if len(list(module.children() ) ) > 0: lowerCAmelCase__ , lowerCAmelCase__ = _replace_with_bnb_layers( A , A , A , A ) lowerCAmelCase__ = has_been_replaced | _has_been_replaced # Remove the last key for recursion current_key_name.pop(-1 ) return model, has_been_replaced def _snake_case ( A ) -> Tuple: # Create a copy of the model with init_empty_weights(): lowerCAmelCase__ = deepcopy(A ) # this has 0 cost since it is done inside `init_empty_weights` context manager` lowerCAmelCase__ = find_tied_parameters(A ) # For compatibility with Accelerate < 0.18 if isinstance(A , A ): lowerCAmelCase__ = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() ) else: lowerCAmelCase__ = sum(A , [] ) lowerCAmelCase__ = len(A ) > 0 # Check if it is a base model lowerCAmelCase__ = False if hasattr(A , '''base_model_prefix''' ): lowerCAmelCase__ = not hasattr(A , model.base_model_prefix ) # Ignore this for base models (BertModel, GPT2Model, etc.) if (not has_tied_params) and is_base_model: return [] # otherwise they have an attached head lowerCAmelCase__ = list(model.named_children() ) lowerCAmelCase__ = [list_modules[-1][0]] # add last module together with tied weights lowerCAmelCase__ = set(A ) - set(A ) lowerCAmelCase__ = list(set(A ) ) + list(A ) # remove ".weight" from the keys lowerCAmelCase__ = ['''.weight''', '''.bias'''] lowerCAmelCase__ = [] for name in list_untouched: for name_to_remove in names_to_remove: if name_to_remove in name: lowerCAmelCase__ = name.replace(A , '''''' ) filtered_module_names.append(A ) return filtered_module_names def _snake_case ( A ) -> Optional[int]: for m in model.modules(): if isinstance(A , bnb.nn.Linearabit ): return True return False def _snake_case ( A ) -> Union[str, Any]: return next(parameter.parameters() ).device def _snake_case ( A , A , A , A , A , A , A ) -> Any: # if it is not quantized, we quantize and offload the quantized weights and the SCB stats if fpaa_statistics is None: set_module_tensor_to_device(A , A , 0 , dtype=A , value=A ) lowerCAmelCase__ = param_name lowerCAmelCase__ = model if "." in tensor_name: lowerCAmelCase__ = tensor_name.split('''.''' ) for split in splits[:-1]: lowerCAmelCase__ = getattr(A , A ) if new_module is None: raise ValueError(F"""{module} has no attribute {split}.""" ) lowerCAmelCase__ = new_module lowerCAmelCase__ = splits[-1] # offload weights lowerCAmelCase__ = False offload_weight(module._parameters[tensor_name] , A , A , index=A ) if hasattr(module._parameters[tensor_name] , '''SCB''' ): offload_weight( module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A , index=A , ) else: offload_weight(A , A , A , index=A ) offload_weight(A , param_name.replace('''weight''' , '''SCB''' ) , A , index=A ) set_module_tensor_to_device(A , A , '''meta''' , dtype=A , value=torch.empty(*param.size() ) )
90
import math from collections.abc import Callable def _a ( lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : float = xa SCREAMING_SNAKE_CASE__ : float = xa while True: if x_n == x_na or function(lowercase__ ) == function(lowercase__ ): raise ZeroDivisionError('float division by zero, could not find root' ) SCREAMING_SNAKE_CASE__ : float = x_na - ( function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na SCREAMING_SNAKE_CASE__ : Dict = x_na SCREAMING_SNAKE_CASE__ : List[str] = x_na def _a ( lowercase__ : float ): '''simple docstring''' return math.pow(lowercase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
85
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available _lowercase = { '''configuration_xlm''': ['''XLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XLMConfig''', '''XLMOnnxConfig'''], '''tokenization_xlm''': ['''XLMTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''XLMForMultipleChoice''', '''XLMForQuestionAnswering''', '''XLMForQuestionAnsweringSimple''', '''XLMForSequenceClassification''', '''XLMForTokenClassification''', '''XLMModel''', '''XLMPreTrainedModel''', '''XLMWithLMHeadModel''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase = [ '''TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFXLMForMultipleChoice''', '''TFXLMForQuestionAnsweringSimple''', '''TFXLMForSequenceClassification''', '''TFXLMForTokenClassification''', '''TFXLMMainLayer''', '''TFXLMModel''', '''TFXLMPreTrainedModel''', '''TFXLMWithLMHeadModel''', ] if TYPE_CHECKING: from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig from .tokenization_xlm import XLMTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) else: import sys _lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
91
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'AutoImageProcessor' lowercase_ = 'AutoTokenizer' def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]: """simple docstring""" super().__init__(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self.image_processor def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Any )-> Any: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
85
0
'''simple docstring''' import unittest import numpy as np from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch if is_vision_available(): from PIL import Image from transformers import MobileViTImageProcessor class __SCREAMING_SNAKE_CASE ( unittest.TestCase ): def __init__( self : Optional[int] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str=7 , UpperCAmelCase__ : Tuple=3 , UpperCAmelCase__ : Optional[Any]=18 , UpperCAmelCase__ : Union[str, Any]=30 , UpperCAmelCase__ : List[Any]=400 , UpperCAmelCase__ : List[Any]=True , UpperCAmelCase__ : Optional[Any]=None , UpperCAmelCase__ : Union[str, Any]=True , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : Dict=True , ): '''simple docstring''' lowercase : int =size if size is not None else {'''shortest_edge''': 20} lowercase : List[Any] =crop_size if crop_size is not None else {'''height''': 18, '''width''': 18} lowercase : Optional[Any] =parent lowercase : List[Any] =batch_size lowercase : str =num_channels lowercase : Dict =image_size lowercase : List[Any] =min_resolution lowercase : Dict =max_resolution lowercase : Dict =do_resize lowercase : Union[str, Any] =size lowercase : Dict =do_center_crop lowercase : Optional[Any] =crop_size lowercase : Any =do_flip_channel_order def lowerCamelCase_ ( self : List[Any] ): '''simple docstring''' return { "do_resize": self.do_resize, "size": self.size, "do_center_crop": self.do_center_crop, "crop_size": self.crop_size, "do_flip_channel_order": self.do_flip_channel_order, } @require_torch @require_vision class __SCREAMING_SNAKE_CASE ( lowercase__ , unittest.TestCase ): lowerCamelCase_ = MobileViTImageProcessor if is_vision_available() else None def lowerCamelCase_ ( self : List[str] ): '''simple docstring''' lowercase : List[str] =MobileViTImageProcessingTester(self ) @property def lowerCamelCase_ ( self : str ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Tuple =self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_resize''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''size''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_center_crop''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''center_crop''' ) ) self.assertTrue(hasattr(UpperCAmelCase__ , '''do_flip_channel_order''' ) ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' lowercase : Dict =self.image_processing_class.from_dict(self.image_processor_dict ) self.assertEqual(image_processor.size , {'''shortest_edge''': 20} ) self.assertEqual(image_processor.crop_size , {'''height''': 18, '''width''': 18} ) lowercase : str =self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 ) self.assertEqual(image_processor.size , {'''shortest_edge''': 42} ) self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} ) def lowerCamelCase_ ( self : Union[str, Any] ): '''simple docstring''' pass def lowerCamelCase_ ( self : Dict ): '''simple docstring''' # Initialize image_processing lowercase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict ) # create random PIL images lowercase : List[str] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , Image.Image ) # Test not batched input lowercase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase : Optional[int] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase_ ( self : Any ): '''simple docstring''' # Initialize image_processing lowercase : Union[str, Any] =self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors lowercase : Optional[Any] =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , numpify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , np.ndarray ) # Test not batched input lowercase : Optional[int] =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase : Tuple =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) def lowerCamelCase_ ( self : str ): '''simple docstring''' # Initialize image_processing lowercase : Tuple =self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors lowercase : Any =prepare_image_inputs(self.image_processor_tester , equal_resolution=UpperCAmelCase__ , torchify=UpperCAmelCase__ ) for image in image_inputs: self.assertIsInstance(UpperCAmelCase__ , torch.Tensor ) # Test not batched input lowercase : Tuple =image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( 1, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , ) # Test batched lowercase : Optional[int] =image_processing(UpperCAmelCase__ , return_tensors='''pt''' ).pixel_values self.assertEqual( encoded_images.shape , ( self.image_processor_tester.batch_size, self.image_processor_tester.num_channels, self.image_processor_tester.crop_size['''height'''], self.image_processor_tester.crop_size['''width'''], ) , )
92
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _a ( lowercase__ : int = 3 ): '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(lowercase__ ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' ) SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits for i in range(lowercase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowercase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowercase__ , lowercase__ ) # simulate with 10000 shots SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' ) SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 ) return job.result().get_counts(lowercase__ ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
85
0
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any: """simple docstring""" lowerCAmelCase__ :Tuple = TapasConfig.from_json_file(_SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter lowerCAmelCase__ :int = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": lowerCAmelCase__ :Optional[int] = TapasForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams lowerCAmelCase__ :Optional[int] = 4 lowerCAmelCase__ :Tuple = True # hparam_utils.py hparams lowerCAmelCase__ :Optional[int] = 0.6_6_4_6_9_4 lowerCAmelCase__ :Tuple = 0.2_0_7_9_5_1 lowerCAmelCase__ :List[str] = 0.1_2_1_1_9_4 lowerCAmelCase__ :Optional[Any] = True lowerCAmelCase__ :Dict = True lowerCAmelCase__ :Union[str, Any] = False lowerCAmelCase__ :List[str] = 0.0_3_5_2_5_1_3 lowerCAmelCase__ :Optional[Any] = TapasForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams lowerCAmelCase__ :Optional[Any] = 4 lowerCAmelCase__ :str = False # hparam_utils.py hparams lowerCAmelCase__ :Any = 3_6.4_5_1_9 lowerCAmelCase__ :Tuple = 0.9_0_3_4_2_1 lowerCAmelCase__ :Optional[int] = 2_2_2.0_8_8 lowerCAmelCase__ :int = True lowerCAmelCase__ :List[str] = True lowerCAmelCase__ :Optional[Any] = True lowerCAmelCase__ :List[str] = 0.7_6_3_1_4_1 lowerCAmelCase__ :Tuple = TapasForQuestionAnswering(config=_SCREAMING_SNAKE_CASE ) elif task == "TABFACT": lowerCAmelCase__ :List[str] = TapasForSequenceClassification(config=_SCREAMING_SNAKE_CASE ) elif task == "MLM": lowerCAmelCase__ :Optional[int] = TapasForMaskedLM(config=_SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": lowerCAmelCase__ :Dict = TapasModel(config=_SCREAMING_SNAKE_CASE ) else: raise ValueError(F"Task {task} not supported." ) print(F"Building PyTorch model from configuration: {config}" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"Save PyTorch model to {pytorch_dump_path}" ) model.save_pretrained(_SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"Save tokenizer files to {pytorch_dump_path}" ) lowerCAmelCase__ :str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + 'vocab.txt' , model_max_length=512 ) tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE ) print('Used relative position embeddings:' , model.config.reset_position_index_per_cell ) if __name__ == "__main__": __A = argparse.ArgumentParser() # Required parameters parser.add_argument( """--task""", default="""SQA""", type=str, help="""Model task for which to convert a checkpoint. Defaults to SQA.""" ) parser.add_argument( """--reset_position_index_per_cell""", default=False, action="""store_true""", help="""Whether to use relative position embeddings or not. Defaults to True.""", ) parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--tapas_config_file""", default=None, type=str, required=True, help=( """The config json file corresponding to the pre-trained TAPAS model. \n""" """This specifies the model architecture.""" ), ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) __A = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
93
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : int = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.mean(1 ) # Centralize the data of class i SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowercase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 ) SCREAMING_SNAKE_CASE__ : List[str] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.shape[1] SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : str = device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' if features.any(): SCREAMING_SNAKE_CASE__ : Any = features.mean(1 ) # Center the dataset SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) ) SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ ) # Take all the columns in the reverse order (-1), and then takes only the first SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ): '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh( covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions] SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] ) SCREAMING_SNAKE_CASE__ : str = 2 SCREAMING_SNAKE_CASE__ : Dict = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if isinstance(lowercase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ ) if not np.allclose(lowercase__ , lowercase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
85
0
'''simple docstring''' import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import CLIPSegProcessor, ViTImageProcessor @require_vision class UpperCAmelCase_ ( unittest.TestCase ): """simple docstring""" def A__ ( self : Union[str, Any] ) -> Tuple: '''simple docstring''' lowercase : Dict =tempfile.mkdtemp() # fmt: off lowercase : str =['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>'''] # fmt: on lowercase : Any =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) ) lowercase : str =['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', ''''''] lowercase : List[Any] ={'''unk_token''': '''<unk>'''} lowercase : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] ) lowercase : Dict =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] ) with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write(json.dumps(UpperCAmelCase ) + '''\n''' ) with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp: fp.write('''\n'''.join(UpperCAmelCase ) ) lowercase : Dict ={ '''do_resize''': True, '''size''': 20, '''do_center_crop''': True, '''crop_size''': 18, '''do_normalize''': True, '''image_mean''': [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3], '''image_std''': [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1], } lowercase : int =os.path.join(self.tmpdirname , UpperCAmelCase ) with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp: json.dump(UpperCAmelCase , UpperCAmelCase ) def A__ ( self : Dict , **UpperCAmelCase : List[str] ) -> Union[str, Any]: '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def A__ ( self : Optional[Any] , **UpperCAmelCase : Tuple ) -> List[Any]: '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def A__ ( self : Tuple , **UpperCAmelCase : Dict ) -> int: '''simple docstring''' return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCAmelCase ) def A__ ( self : List[Any] ) -> Any: '''simple docstring''' shutil.rmtree(self.tmpdirname ) def A__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' lowercase : Dict =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )] lowercase : str =[Image.fromarray(np.moveaxis(UpperCAmelCase , 0 , -1 ) ) for x in image_inputs] return image_inputs def A__ ( self : int ) -> List[Any]: '''simple docstring''' lowercase : str =self.get_tokenizer() lowercase : List[str] =self.get_rust_tokenizer() lowercase : Union[str, Any] =self.get_image_processor() lowercase : str =CLIPSegProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_slow.save_pretrained(self.tmpdirname ) lowercase : List[Any] =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCAmelCase ) lowercase : List[Any] =CLIPSegProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) processor_fast.save_pretrained(self.tmpdirname ) lowercase : int =CLIPSegProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , UpperCAmelCase ) self.assertIsInstance(processor_fast.tokenizer , UpperCAmelCase ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , UpperCAmelCase ) self.assertIsInstance(processor_fast.image_processor , UpperCAmelCase ) def A__ ( self : Union[str, Any] ) -> int: '''simple docstring''' lowercase : Optional[int] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) lowercase : Dict =self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' ) lowercase : Dict =self.get_image_processor(do_normalize=UpperCAmelCase , padding_value=1.0 ) lowercase : List[str] =CLIPSegProcessor.from_pretrained( self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=UpperCAmelCase , padding_value=1.0 ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , UpperCAmelCase ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , UpperCAmelCase ) def A__ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' lowercase : str =self.get_image_processor() lowercase : Optional[int] =self.get_tokenizer() lowercase : str =CLIPSegProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowercase : Union[str, Any] =self.prepare_image_inputs() lowercase : int =image_processor(UpperCAmelCase , return_tensors='''np''' ) lowercase : Optional[int] =processor(images=UpperCAmelCase , return_tensors='''np''' ) for key in input_feat_extract.keys(): self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 ) def A__ ( self : Any ) -> List[Any]: '''simple docstring''' lowercase : Optional[int] =self.get_image_processor() lowercase : Dict =self.get_tokenizer() lowercase : List[str] =CLIPSegProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowercase : Any ='''lower newer''' lowercase : int =processor(text=UpperCAmelCase ) lowercase : str =tokenizer(UpperCAmelCase ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key] , encoded_processor[key] ) def A__ ( self : List[str] ) -> List[Any]: '''simple docstring''' lowercase : Optional[int] =self.get_image_processor() lowercase : Union[str, Any] =self.get_tokenizer() lowercase : Any =CLIPSegProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowercase : Optional[int] ='''lower newer''' lowercase : Union[str, Any] =self.prepare_image_inputs() lowercase : Union[str, Any] =processor(text=UpperCAmelCase , images=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def A__ ( self : Optional[int] ) -> Tuple: '''simple docstring''' lowercase : int =self.get_image_processor() lowercase : List[Any] =self.get_tokenizer() lowercase : List[Any] =CLIPSegProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowercase : str =self.prepare_image_inputs() lowercase : int =self.prepare_image_inputs() lowercase : Union[str, Any] =processor(images=UpperCAmelCase , visual_prompt=UpperCAmelCase ) self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] ) # test if it raises when no input is passed with pytest.raises(UpperCAmelCase ): processor() def A__ ( self : Optional[Any] ) -> Dict: '''simple docstring''' lowercase : Dict =self.get_image_processor() lowercase : Optional[Any] =self.get_tokenizer() lowercase : Union[str, Any] =CLIPSegProcessor(tokenizer=UpperCAmelCase , image_processor=UpperCAmelCase ) lowercase : Union[str, Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] lowercase : Union[str, Any] =processor.batch_decode(UpperCAmelCase ) lowercase : str =tokenizer.batch_decode(UpperCAmelCase ) self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
94
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip" SCREAMING_SNAKE_CASE__ : Dict = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig( temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage ) SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ ) original.eval() SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('convert the model' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('Make sure that the models\' outputs are identical' ) SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' ) # prepare the model inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE__ : int = encoder_input_ids SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = new_model( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) if are_identical: logging.info('all weights are equal up to 1e-3' ) else: raise ValueError('the weights are different. The new model is likely different from the original one.' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('saving the model\'s state dictionary' ) torch.save( new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
85
0
"""simple docstring""" def snake_case ( A__ = 50 ): UpperCAmelCase_ : Any = [1] * (length + 1) for row_length in range(length + 1 ): for tile_length in range(2 ,5 ): for tile_start in range(row_length - tile_length + 1 ): ways_number[row_length] += ways_number[ row_length - tile_start - tile_length ] return ways_number[length] if __name__ == "__main__": print(f'{solution() = }')
95
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size SCREAMING_SNAKE_CASE__ : str = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = num_labels SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : str = len(a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_config() return config, pixel_values, labels def __lowercase( self : str )-> str: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowercase_ = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self ) SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __lowercase( self : Any )-> List[Any]: """simple docstring""" pass def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ): SCREAMING_SNAKE_CASE__ : Any = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(a_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE__ : List[Any] = layer_type SCREAMING_SNAKE_CASE__ : Union[str, Any] = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(a_ , a_ , a_ ) def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ): SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple() def recursive_check(a_ : List[Any] , a_ : int ): if isinstance(a_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ): recursive_check(a_ , a_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(a_ , a_ ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(a_ , a_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) def __lowercase( self : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def __lowercase( self : Any )-> List[str]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : List[Any] )-> int: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : Any = prepare_img() SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
85
0
"""simple docstring""" def a ( __UpperCAmelCase : int = 2_0_0_0_0_0_0 ) -> int: __magic_name__: str = [0 for i in range(n + 1 )] __magic_name__: Tuple = 1 __magic_name__: Dict = 1 for i in range(2 , int(n**0.5 ) + 1 ): if primality_list[i] == 0: for j in range(i * i , n + 1 , __UpperCAmelCase ): __magic_name__: Optional[int] = 1 __magic_name__: Any = 0 for i in range(__UpperCAmelCase ): if primality_list[i] == 0: sum_of_primes += i return sum_of_primes if __name__ == "__main__": print(f'''{solution() = }''')
96
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __a = logging.get_logger(__name__) __a = { 'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json', 'BridgeTower/bridgetower-base-itm-mlm': ( 'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json' ), } class lowercase__( UpperCAmelCase ): """simple docstring""" a :Optional[Any] = 'bridgetower_vision_model' def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=7_6_8 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_2 , SCREAMING_SNAKE_CASE_ : str=3 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=1_6 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_8_8 , SCREAMING_SNAKE_CASE_ : Optional[int]=1 , SCREAMING_SNAKE_CASE_ : Tuple=1e-05 , SCREAMING_SNAKE_CASE_ : Optional[Any]=False , SCREAMING_SNAKE_CASE_ : List[str]=True , SCREAMING_SNAKE_CASE_ : int=False , **SCREAMING_SNAKE_CASE_ : int , ) -> Dict: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_channels lowercase_ = patch_size lowercase_ = image_size lowercase_ = initializer_factor lowercase_ = layer_norm_eps lowercase_ = stop_gradient lowercase_ = share_layernorm lowercase_ = remove_last_layer @classmethod def _lowercase ( cls : Any , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : List[Any] ) -> "PretrainedConfig": lowercase_ , lowercase_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config_dict.get('''model_type''' ) == "bridgetower": lowercase_ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :Tuple = 'bridgetower_text_model' def __init__( self : Any , SCREAMING_SNAKE_CASE_ : List[str]=5_0_2_6_5 , SCREAMING_SNAKE_CASE_ : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE_ : Any=1_2 , SCREAMING_SNAKE_CASE_ : Dict=1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_0_7_2 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=5_1_4 , SCREAMING_SNAKE_CASE_ : List[str]=1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1e-05 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1 , SCREAMING_SNAKE_CASE_ : Dict=0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=2 , SCREAMING_SNAKE_CASE_ : Optional[int]="absolute" , SCREAMING_SNAKE_CASE_ : Tuple=True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> str: super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = vocab_size lowercase_ = hidden_size lowercase_ = num_hidden_layers lowercase_ = num_attention_heads lowercase_ = hidden_act lowercase_ = initializer_factor lowercase_ = intermediate_size lowercase_ = hidden_dropout_prob lowercase_ = attention_probs_dropout_prob lowercase_ = max_position_embeddings lowercase_ = type_vocab_size lowercase_ = layer_norm_eps lowercase_ = position_embedding_type lowercase_ = use_cache lowercase_ = pad_token_id lowercase_ = bos_token_id lowercase_ = eos_token_id @classmethod def _lowercase ( cls : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, os.PathLike] , **SCREAMING_SNAKE_CASE_ : Any ) -> "PretrainedConfig": lowercase_ , lowercase_ = cls.get_config_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) if config_dict.get('''model_type''' ) == "bridgetower": lowercase_ = config_dict['''text_config'''] if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type: logger.warning( f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type ''' f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' ) return cls.from_dict(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) class lowercase__( UpperCAmelCase ): """simple docstring""" a :List[Any] = 'bridgetower' def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : int=7_6_8 , SCREAMING_SNAKE_CASE_ : Any=1 , SCREAMING_SNAKE_CASE_ : Any=1e-05 , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Any="add" , SCREAMING_SNAKE_CASE_ : List[Any]=1_2 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=False , SCREAMING_SNAKE_CASE_ : List[Any]=False , SCREAMING_SNAKE_CASE_ : Union[str, Any]=None , SCREAMING_SNAKE_CASE_ : str=None , **SCREAMING_SNAKE_CASE_ : Dict , ) -> Union[str, Any]: # TODO: remove this once the Hub files are updated. lowercase_ = kwargs.pop('''text_config_dict''' , SCREAMING_SNAKE_CASE_ ) lowercase_ = kwargs.pop('''vision_config_dict''' , SCREAMING_SNAKE_CASE_ ) super().__init__(**SCREAMING_SNAKE_CASE_ ) lowercase_ = share_cross_modal_transformer_layers lowercase_ = hidden_act lowercase_ = hidden_size lowercase_ = initializer_factor lowercase_ = layer_norm_eps lowercase_ = share_link_tower_layers lowercase_ = link_tower_type lowercase_ = num_attention_heads lowercase_ = num_hidden_layers lowercase_ = tie_word_embeddings lowercase_ = init_layernorm_from_vision_encoder if text_config is None: lowercase_ = {} logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' ) if vision_config is None: lowercase_ = {} logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' ) lowercase_ = BridgeTowerTextConfig(**SCREAMING_SNAKE_CASE_ ) lowercase_ = BridgeTowerVisionConfig(**SCREAMING_SNAKE_CASE_ ) @classmethod def _lowercase ( cls : str , SCREAMING_SNAKE_CASE_ : BridgeTowerTextConfig , SCREAMING_SNAKE_CASE_ : BridgeTowerVisionConfig , **SCREAMING_SNAKE_CASE_ : Optional[int] ) -> List[str]: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE_ ) def _lowercase ( self : Optional[int] ) -> List[Any]: lowercase_ = copy.deepcopy(self.__dict__ ) lowercase_ = self.text_config.to_dict() lowercase_ = self.vision_config.to_dict() lowercase_ = self.__class__.model_type return output
97
def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) if n == 0: return 0 SCREAMING_SNAKE_CASE__ : str = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : int = max( lowercase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase__ ) ) return max_revue def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : str = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(lowercase__ , lowercase__ , lowercase__ ) def _a ( lowercase__ : int , lowercase__ : list , lowercase__ : list ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: SCREAMING_SNAKE_CASE__ : List[str] = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Any = max( lowercase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = max_revenue return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. SCREAMING_SNAKE_CASE__ : Optional[int] = [float('-inf' ) for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : int = 0 for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = max_rev[i] for j in range(1 , i + 1 ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(lowercase__ , prices[j - 1] + max_rev[i - j] ) SCREAMING_SNAKE_CASE__ : Dict = max_revenue_i return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' if n < 0: SCREAMING_SNAKE_CASE__ : Tuple = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(lowercase__ ) if n > len(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = ( 'Each integral piece of rod must have a corresponding price. ' f'''Got n = {n} but length of prices = {len(lowercase__ )}''' ) raise ValueError(lowercase__ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [6, 10, 12, 15, 20, 23] SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. SCREAMING_SNAKE_CASE__ : Optional[Any] = 36 SCREAMING_SNAKE_CASE__ : Tuple = top_down_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = bottom_up_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = naive_cut_rod_recursive(lowercase__ , lowercase__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
85
0
'''simple docstring''' import logging import sys from dataclasses import dataclass, field from typing import Any, Dict, List, Optional, Union import librosa import torch from datasets import DatasetDict, load_dataset from packaging import version from torch import nn from transformers import ( HfArgumentParser, Trainer, TrainingArguments, WavaVecaConfig, WavaVecaFeatureExtractor, WavaVecaForPreTraining, is_apex_available, trainer_utils, ) from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices if is_apex_available(): from apex import amp if version.parse(version.parse(torch.__version__).base_version) >= version.parse('1.6'): lowercase__ : Any = True from torch.cuda.amp import autocast lowercase__ : List[str] = logging.getLogger(__name__) @dataclass class __lowerCAmelCase : """simple docstring""" _snake_case : str = field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} ) _snake_case : Optional[str] = field( default=__magic_name__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , ) _snake_case : Optional[bool] = field( default=__magic_name__ , metadata={'help': 'Whether to freeze the feature extractor layers of the model.'} ) _snake_case : Optional[bool] = field( default=__magic_name__ , metadata={'help': 'Whether to log verbose messages or not.'} , ) _snake_case : Optional[float] = field( default=2.0 , metadata={'help': 'Maximum temperature for gumbel softmax.'} ) _snake_case : Optional[float] = field( default=0.5 , metadata={'help': 'Minimum temperature for gumbel softmax.'} ) _snake_case : Optional[float] = field( default=0.99_99_95 , metadata={'help': 'Decay of gumbel temperature during training.'} ) def a__ ( lowercase : ModelArguments, lowercase : TrainingArguments ) -> int: """simple docstring""" logging.basicConfig( format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', handlers=[logging.StreamHandler(sys.stdout )], ) _UpperCamelCase = logging.WARNING if model_args.verbose_logging: _UpperCamelCase = logging.DEBUG elif trainer_utils.is_main_process(training_args.local_rank ): _UpperCamelCase = logging.INFO logger.setLevel(lowercase ) @dataclass class __lowerCAmelCase : """simple docstring""" _snake_case : str = field( default=__magic_name__ , metadata={'help': 'The name of the dataset to use (via the datasets library).'} ) _snake_case : Optional[str] = field( default=__magic_name__ , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} ) _snake_case : Optional[str] = field( default='train' , metadata={ 'help': 'The name of the training data set split to use (via the datasets library). Defaults to \'train\'' } , ) _snake_case : Optional[str] = field( default='validation' , metadata={ 'help': ( 'The name of the validation data set split to use (via the datasets library). Defaults to \'validation\'' ) } , ) _snake_case : Optional[str] = field( default='file' , metadata={'help': 'Column in the dataset that contains speech file path. Defaults to \'file\''} , ) _snake_case : bool = field( default=__magic_name__ , metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} ) _snake_case : Optional[int] = field( default=1 , metadata={ 'help': 'The percentage of the train set used as validation set in case there\'s no validation split' } , ) _snake_case : Optional[int] = field( default=__magic_name__ , metadata={'help': 'The number of processes to use for the preprocessing.'} , ) _snake_case : Optional[float] = field( default=20.0 , metadata={'help': 'Filter audio files that are longer than `max_duration_in_seconds` seconds'} ) @dataclass class __lowerCAmelCase : """simple docstring""" _snake_case : WavaVecaForPreTraining _snake_case : WavaVecaFeatureExtractor _snake_case : Union[bool, str] = "longest" _snake_case : Optional[int] = None _snake_case : Optional[int] = None def __call__( self : List[Any] , lowerCAmelCase__ : List[Dict[str, Union[List[int], torch.Tensor]]] ) -> Dict[str, torch.Tensor]: '''simple docstring''' _UpperCamelCase = self.feature_extractor.pad( lowerCAmelCase__ , max_length=self.max_length , padding=self.padding , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors='''pt''' , ) _UpperCamelCase = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] ) _UpperCamelCase = batch['''input_values'''].shape[0] # make sure that no loss is computed on padded inputs if batch["attention_mask"] is not None: # compute real output lengths according to convolution formula _UpperCamelCase = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to( torch.long ) _UpperCamelCase = torch.zeros( (batch_size, mask_indices_seq_length) , dtype=torch.long , device=batch['''input_values'''].device ) # these two operations makes sure that all values # before the output lengths indices are attended to _UpperCamelCase = 1 _UpperCamelCase = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool() # sample randomly masked indices _UpperCamelCase = _compute_mask_indices( (batch_size, mask_indices_seq_length) , self.model.config.mask_time_prob , self.model.config.mask_time_length , attention_mask=lowerCAmelCase__ , min_masks=2 , ) return batch class __lowerCAmelCase ( __magic_name__ ): """simple docstring""" def __init__( self : str , *lowerCAmelCase__ : Tuple , lowerCAmelCase__ : List[str]=1 , lowerCAmelCase__ : List[str]=0 , lowerCAmelCase__ : str=1.0 , **lowerCAmelCase__ : List[Any] ) -> Tuple: '''simple docstring''' super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) _UpperCamelCase = 0 _UpperCamelCase = max_gumbel_temp _UpperCamelCase = min_gumbel_temp _UpperCamelCase = gumbel_temp_decay def snake_case__ ( self : Optional[int] , lowerCAmelCase__ : nn.Module , lowerCAmelCase__ : Dict[str, Union[torch.Tensor, Any]] ) -> torch.Tensor: '''simple docstring''' model.train() _UpperCamelCase = self._prepare_inputs(lowerCAmelCase__ ) if self.use_amp: with autocast(): _UpperCamelCase = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ ) else: _UpperCamelCase = self.compute_loss(lowerCAmelCase__ , lowerCAmelCase__ ) if self.args.n_gpu > 1 or self.deepspeed: if model.module.config.ctc_loss_reduction == "mean": _UpperCamelCase = loss.mean() elif model.module.config.ctc_loss_reduction == "sum": _UpperCamelCase = loss.sum() / (inputs['''mask_time_indices''']).sum() else: raise ValueError(f"""{model.config.ctc_loss_reduction} is not valid. Choose one of ['mean', 'sum']""" ) if self.args.gradient_accumulation_steps > 1: _UpperCamelCase = loss / self.args.gradient_accumulation_steps if self.use_amp: self.scaler.scale(lowerCAmelCase__ ).backward() elif self.use_apex: with amp.scale_loss(lowerCAmelCase__ , self.optimizer ) as scaled_loss: scaled_loss.backward() elif self.deepspeed: self.deepspeed.backward(lowerCAmelCase__ ) else: loss.backward() self.num_update_step += 1 # make sure gumbel softmax temperature is decayed if self.args.n_gpu > 1 or self.deepspeed: model.module.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) else: model.set_gumbel_temperature( max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step , self.min_gumbel_temp ) ) return loss.detach() def a__ ( ) -> List[Any]: """simple docstring""" _UpperCamelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) _UpperCamelCase , _UpperCamelCase , _UpperCamelCase = parser.parse_args_into_dataclasses() configure_logger(lowercase, lowercase ) # Downloading and loading a dataset from the hub. _UpperCamelCase = load_dataset(data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir ) if "validation" not in datasets.keys(): # make sure only "validation" and "train" keys remain" _UpperCamelCase = DatasetDict() _UpperCamelCase = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=F"""{data_args.train_split_name}[:{data_args.validation_split_percentage}%]""", cache_dir=model_args.cache_dir, ) _UpperCamelCase = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=F"""{data_args.train_split_name}[{data_args.validation_split_percentage}%:]""", cache_dir=model_args.cache_dir, ) else: # make sure only "validation" and "train" keys remain" _UpperCamelCase = DatasetDict() _UpperCamelCase = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split='''validation''', cache_dir=model_args.cache_dir, ) _UpperCamelCase = load_dataset( data_args.dataset_name, data_args.dataset_config_name, split=F"""{data_args.train_split_name}""", cache_dir=model_args.cache_dir, ) # only normalized-inputs-training is supported _UpperCamelCase = WavaVecaFeatureExtractor.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, do_normalize=lowercase ) def prepare_dataset(lowercase : Optional[Any] ): # check that all files have the correct sampling rate _UpperCamelCase , _UpperCamelCase = librosa.load(batch[data_args.speech_file_column], sr=feature_extractor.sampling_rate ) return batch # load audio files into numpy arrays _UpperCamelCase = datasets.map( lowercase, num_proc=data_args.preprocessing_num_workers, remove_columns=datasets['''train'''].column_names ) # filter audio files that are too long _UpperCamelCase = vectorized_datasets.filter( lambda lowercase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) ) def normalize(lowercase : int ): return feature_extractor(batch['''speech'''], sampling_rate=feature_extractor.sampling_rate ) # normalize and transform to `BatchFeatures` _UpperCamelCase = vectorized_datasets.map( lowercase, batched=lowercase, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, remove_columns=vectorized_datasets['''train'''].column_names, ) # pretraining is only supported for "newer" stable layer norm architecture # apply_spec_augment has to be True, mask_feature_prob has to be 0.0 _UpperCamelCase = WavaVecaConfig.from_pretrained( model_args.model_name_or_path, cache_dir=model_args.cache_dir, gradient_checkpointing=training_args.gradient_checkpointing, ) if not config.do_stable_layer_norm or config.feat_extract_norm != "layer": raise ValueError( '''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and''' ''' ``config.feat_extract_norm=\'layer\'''' ) _UpperCamelCase = WavaVecaForPreTraining(lowercase ) _UpperCamelCase = DataCollatorForWavaVecaPretraining(model=lowercase, feature_extractor=lowercase ) _UpperCamelCase = WavaVecaPreTrainer( model=lowercase, data_collator=lowercase, args=lowercase, train_dataset=vectorized_datasets['''train'''], eval_dataset=vectorized_datasets['''validation'''], tokenizer=lowercase, max_gumbel_temp=model_args.max_gumbel_temperature, min_gumbel_temp=model_args.min_gumbel_temperature, gumbel_temp_decay=model_args.gumbel_temperature_decay, ) trainer.train() if __name__ == "__main__": main()
98
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = CamembertTokenizer lowercase_ = CamembertTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Tuple )-> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>' SCREAMING_SNAKE_CASE__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>NOTUSED' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a_ ) , 1004 ) def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def __lowercase( self : List[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def __lowercase( self : List[str] )-> Dict: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. SCREAMING_SNAKE_CASE__ : str = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
85
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE = { 'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE = [ 'LILT_PRETRAINED_MODEL_ARCHIVE_LIST', 'LiltForQuestionAnswering', 'LiltForSequenceClassification', 'LiltForTokenClassification', 'LiltModel', 'LiltPreTrainedModel', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
99
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"] SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
def __snake_case ( lowerCAmelCase_ = 1_0_0 ) -> int: SCREAMING_SNAKE_CASE__ = 0 SCREAMING_SNAKE_CASE__ = 0 for i in range(1 , n + 1 ): sum_of_squares += i**2 sum_of_ints += i return sum_of_ints**2 - sum_of_squares if __name__ == "__main__": print(F'{solution() = }')
100
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = do_rescale SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor SCREAMING_SNAKE_CASE__ : Dict = do_pad SCREAMING_SNAKE_CASE__ : Any = pad_size def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ ) def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_pad: SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ )
85
0
import json import os import shutil import tempfile import unittest from transformers import BatchEncoding, CanineTokenizer from transformers.testing_utils import require_tokenizers, require_torch from transformers.tokenization_utils import AddedToken from transformers.utils import cached_property from ...test_tokenization_common import TokenizerTesterMixin class __lowercase (__SCREAMING_SNAKE_CASE , unittest.TestCase ): """simple docstring""" _UpperCAmelCase = CanineTokenizer _UpperCAmelCase = False def UpperCamelCase__ ( self ): """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = CanineTokenizer() tokenizer.save_pretrained(self.tmpdirname ) @cached_property def UpperCamelCase__ ( self ): """simple docstring""" return CanineTokenizer.from_pretrained('google/canine-s' ) def UpperCamelCase__ ( self , **lowerCAmelCase__ ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Tuple = 1_0_2_4 return tokenizer @require_torch def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.canine_tokenizer SCREAMING_SNAKE_CASE_ : List[str] = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.'] # fmt: off SCREAMING_SNAKE_CASE_ : Tuple = [5_7_3_4_4, 7_6, 1_0_5, 1_0_2, 1_0_1, 3_2, 1_0_5, 1_1_5, 3_2, 1_0_8, 1_0_5, 1_0_7, 1_0_1, 3_2, 9_7, 3_2, 9_8, 1_1_1, 1_2_0, 3_2, 1_1_1, 1_0_2, 3_2, 9_9, 1_0_4, 1_1_1, 9_9, 1_1_1, 1_0_8, 9_7, 1_1_6, 1_0_1, 1_1_5, 4_6, 5_7_3_4_5, 0, 0, 0, 0] # fmt: on SCREAMING_SNAKE_CASE_ : Dict = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='pt' ) self.assertIsInstance(lowerCAmelCase__ , lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = list(batch.input_ids.numpy()[0] ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertEqual((2, 3_9) , batch.input_ids.shape ) self.assertEqual((2, 3_9) , batch.attention_mask.shape ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.canine_tokenizer SCREAMING_SNAKE_CASE_ : Dict = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.'] SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer(lowerCAmelCase__ , padding=lowerCAmelCase__ , return_tensors='pt' ) # check if input_ids, attention_mask and token_type_ids are returned self.assertIn('input_ids' , lowerCAmelCase__ ) self.assertIn('attention_mask' , lowerCAmelCase__ ) self.assertIn('token_type_ids' , lowerCAmelCase__ ) @require_torch def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.canine_tokenizer SCREAMING_SNAKE_CASE_ : List[Any] = [ 'What\'s the weater?', 'It\'s about 25 degrees.', ] SCREAMING_SNAKE_CASE_ : int = tokenizer( text_target=lowerCAmelCase__ , max_length=3_2 , padding='max_length' , truncation=lowerCAmelCase__ , return_tensors='pt' ) self.assertEqual(3_2 , targets['input_ids'].shape[1] ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): self.assertNotEqual(tokenizer.model_max_length , 4_2 ) # Now let's start the test SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE_ : Union[str, Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running' SCREAMING_SNAKE_CASE_ : List[str] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) tokenizer.save_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = tokenizer.__class__.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) shutil.rmtree(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = self.get_tokenizers(model_max_length=4_2 ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # Isolate this from the other tests because we save additional tokens/etc SCREAMING_SNAKE_CASE_ : List[Any] = tempfile.mkdtemp() SCREAMING_SNAKE_CASE_ : List[str] = ' He is very happy, UNwant\u00E9d,running' SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.additional_special_tokens # We can add a new special token for Canine as follows: SCREAMING_SNAKE_CASE_ : Optional[Any] = chr(0XE_0_0_7 ) additional_special_tokens.append(lowerCAmelCase__ ) tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} ) SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) tokenizer.save_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.__class__.from_pretrained(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = after_tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ ) self.assertIn(lowerCAmelCase__ , after_tokenizer.additional_special_tokens ) self.assertEqual(after_tokenizer.model_max_length , 4_2 ) SCREAMING_SNAKE_CASE_ : int = tokenizer.__class__.from_pretrained(lowerCAmelCase__ , model_max_length=4_3 ) self.assertEqual(tokenizer.model_max_length , 4_3 ) shutil.rmtree(lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = self.get_clean_sequence(lowerCAmelCase__ ) # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE_ : Dict = 0XE_0_0_5 SCREAMING_SNAKE_CASE_ : Tuple = chr(lowerCAmelCase__ ) tokenizer.add_special_tokens({'cls_token': special_token} ) SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) SCREAMING_SNAKE_CASE_ : Tuple = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : str = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) self.assertEqual(lowerCAmelCase__ , input_encoded + special_token_id ) SCREAMING_SNAKE_CASE_ : Any = tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ ) self.assertTrue(special_token not in decoded ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE_ : List[Any] = chr(0XE_0_0_5 ) SCREAMING_SNAKE_CASE_ : Optional[int] = chr(0XE_0_0_6 ) # `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py) tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=lowerCAmelCase__ ) # `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`, # which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py) tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} ) SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer.tokenize(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer.tokenize(lowerCAmelCase__ ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) self.assertEqual(len(lowerCAmelCase__ ) , 1 ) self.assertEqual(token_a[0] , lowerCAmelCase__ ) self.assertEqual(token_a[0] , lowerCAmelCase__ ) @require_tokenizers def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE_ : Optional[Any] = 0XE_0_0_6 SCREAMING_SNAKE_CASE_ : Tuple = chr(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Optional[int] = AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ ) tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} ) with tempfile.TemporaryDirectory() as tmp_dir_name: tokenizer.save_pretrained(lowerCAmelCase__ ) tokenizer.from_pretrained(lowerCAmelCase__ ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : int = [] if self.test_slow_tokenizer: tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) ) if self.test_rust_tokenizer: tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) ) for tokenizer_class, tokenizer_utils in tokenizer_list: with tempfile.TemporaryDirectory() as tmp_dir: tokenizer_utils.save_pretrained(lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file: SCREAMING_SNAKE_CASE_ : Any = json.load(lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file: SCREAMING_SNAKE_CASE_ : List[str] = json.load(lowerCAmelCase__ ) # a special token for Canine can be defined as follows: SCREAMING_SNAKE_CASE_ : int = 0XE_0_0_6 SCREAMING_SNAKE_CASE_ : List[str] = chr(lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Any = [new_token_a] SCREAMING_SNAKE_CASE_ : Union[str, Any] = [new_token_a] with open(os.path.join(lowerCAmelCase__ , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) with open(os.path.join(lowerCAmelCase__ , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile: json.dump(lowerCAmelCase__ , lowerCAmelCase__ ) # the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes # into account the new value of additional_special_tokens given in the "tokenizer_config.json" and # "special_tokens_map.json" files SCREAMING_SNAKE_CASE_ : Optional[int] = tokenizer_class.from_pretrained(lowerCAmelCase__ , extra_ids=0 ) self.assertIn(lowerCAmelCase__ , tokenizer_without_change_in_init.additional_special_tokens ) # self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens( tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , ) SCREAMING_SNAKE_CASE_ : List[str] = 0XE_0_0_7 SCREAMING_SNAKE_CASE_ : Union[str, Any] = chr(lowerCAmelCase__ ) # Now we test that we can change the value of additional_special_tokens in the from_pretrained SCREAMING_SNAKE_CASE_ : Dict = [AddedToken(lowerCAmelCase__ , lstrip=lowerCAmelCase__ )] SCREAMING_SNAKE_CASE_ : List[Any] = tokenizer_class.from_pretrained( lowerCAmelCase__ , additional_special_tokens=lowerCAmelCase__ , extra_ids=0 ) self.assertIn(lowerCAmelCase__ , tokenizer.additional_special_tokens ) # self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab self.assertEqual( [new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) ) @require_tokenizers def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : List[str] = self.get_tokenizers(do_lower_case=lowerCAmelCase__ ) for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE_ : int = 'hello world' if self.space_between_special_tokens: SCREAMING_SNAKE_CASE_ : List[str] = '[CLS] hello world [SEP]' else: SCREAMING_SNAKE_CASE_ : Optional[Any] = input SCREAMING_SNAKE_CASE_ : Optional[Any] = tokenizer.encode(lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ ) SCREAMING_SNAKE_CASE_ : Dict = tokenizer.decode(lowerCAmelCase__ , spaces_between_special_tokens=self.space_between_special_tokens ) self.assertIn(lowerCAmelCase__ , [output, output.lower()] ) def UpperCamelCase__ ( self ): """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = self.get_tokenizers() for tokenizer in tokenizers: with self.subTest(F'''{tokenizer.__class__.__name__}''' ): SCREAMING_SNAKE_CASE_ : Union[str, Any] = [ 'bos_token', 'eos_token', 'unk_token', 'sep_token', 'pad_token', 'cls_token', 'mask_token', ] SCREAMING_SNAKE_CASE_ : Union[str, Any] = 'a' SCREAMING_SNAKE_CASE_ : Optional[Any] = ord(lowerCAmelCase__ ) for attr in attributes_list: setattr(lowerCAmelCase__ , attr + '_id' , lowerCAmelCase__ ) self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(getattr(lowerCAmelCase__ , attr + '_id' ) , lowerCAmelCase__ ) setattr(lowerCAmelCase__ , attr + '_id' , lowerCAmelCase__ ) self.assertEqual(getattr(lowerCAmelCase__ , lowerCAmelCase__ ) , lowerCAmelCase__ ) self.assertEqual(getattr(lowerCAmelCase__ , attr + '_id' ) , lowerCAmelCase__ ) setattr(lowerCAmelCase__ , 'additional_special_tokens_ids' , [] ) self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens' ) , [] ) self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens_ids' ) , [] ) SCREAMING_SNAKE_CASE_ : str = 0XE_0_0_6 SCREAMING_SNAKE_CASE_ : Union[str, Any] = chr(lowerCAmelCase__ ) setattr(lowerCAmelCase__ , 'additional_special_tokens_ids' , [additional_special_token_id] ) self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens' ) , [additional_special_token] ) self.assertListEqual(getattr(lowerCAmelCase__ , 'additional_special_tokens_ids' ) , [additional_special_token_id] ) def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass def UpperCamelCase__ ( self ): """simple docstring""" pass
101
from pathlib import Path import numpy as np from PIL import Image def _a ( lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return (gray > 1_27) & (gray <= 2_55) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE__ : Optional[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE__ : List[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 ) return output if __name__ == "__main__": # read original image SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg" SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path)) # kernel to be applied SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
85
0
"""simple docstring""" import logging import os import sys from dataclasses import dataclass, field from typing import Optional from seqaseq_trainer import SeqaSeqTrainer from seqaseq_training_args import SeqaSeqTrainingArguments import transformers from transformers import ( AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer, HfArgumentParser, MBartTokenizer, MBartTokenizerFast, set_seed, ) from transformers.trainer_utils import EvaluationStrategy, is_main_process from transformers.training_args import ParallelMode from utils import ( SeqaSeqDataCollator, SeqaSeqDataset, assert_all_frozen, build_compute_metrics_fn, check_output_dir, freeze_embeds, freeze_params, lmap, save_json, use_task_specific_params, write_txt_file, ) __magic_name__ : Tuple = logging.getLogger(__name__) @dataclass class lowercase__ : """simple docstring""" __lowerCAmelCase : str = field( metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} ) __lowerCAmelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} ) __lowerCAmelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} ) __lowerCAmelCase : Optional[str] = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , ) __lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether tp freeze the encoder."""} ) __lowerCAmelCase : bool = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Whether to freeze the embeddings."""} ) @dataclass class lowercase__ : """simple docstring""" __lowerCAmelCase : str = field( metadata={"""help""": """The input data dir. Should contain the .tsv files (or other data files) for the task."""} ) __lowerCAmelCase : Optional[str] = field( default="""summarization""" , metadata={"""help""": """Task name, summarization (or summarization_{dataset} for pegasus) or translation"""} , ) __lowerCAmelCase : Optional[int] = field( default=1024 , metadata={ """help""": ( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowerCAmelCase : Optional[int] = field( default=128 , metadata={ """help""": ( """The maximum total sequence length for target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowerCAmelCase : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for validation target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded. """ """This argument is also used to override the ``max_length`` param of ``model.generate``, which is used """ """during ``evaluate`` and ``predict``.""" ) } , ) __lowerCAmelCase : Optional[int] = field( default=142 , metadata={ """help""": ( """The maximum total sequence length for test target text after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) } , ) __lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# training examples. -1 means use all."""} ) __lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# validation examples. -1 means use all."""} ) __lowerCAmelCase : Optional[int] = field(default=-1 , metadata={"""help""": """# test examples. -1 means use all."""} ) __lowerCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Source language id for translation."""} ) __lowerCAmelCase : Optional[str] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """Target language id for translation."""} ) __lowerCAmelCase : Optional[int] = field(default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """# num_beams to use for evaluation."""} ) __lowerCAmelCase : bool = field( default=__SCREAMING_SNAKE_CASE , metadata={"""help""": """If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."""} , ) def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): logger.info(f"""***** {split} metrics *****""" ) for key in sorted(metrics.keys() ): logger.info(f""" {key} = {metrics[key]}""" ) save_json(SCREAMING_SNAKE_CASE , os.path.join(SCREAMING_SNAKE_CASE , f"""{split}_results.json""" ) ) def UpperCamelCase (): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. UpperCamelCase : List[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. UpperCamelCase , UpperCamelCase , UpperCamelCase : List[str] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: UpperCamelCase , UpperCamelCase , UpperCamelCase : Optional[int] = parser.parse_args_into_dataclasses() check_output_dir(SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , ) logger.warning( """Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank ): transformers.utils.logging.set_verbosity_info() logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE ) # Set seed set_seed(training_args.seed ) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. UpperCamelCase : List[Any] = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCamelCase : Tuple = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""") for p in extra_model_params: if getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): assert hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ), f"""({config.__class__.__name__}) doesn't have a `{p}` attribute""" setattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ) UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , ) UpperCamelCase : Any = AutoModelForSeqaSeqLM.from_pretrained( model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , ) # use task specific params use_task_specific_params(SCREAMING_SNAKE_CASE , data_args.task ) # set num_beams for evaluation if data_args.eval_beams is None: UpperCamelCase : Dict = model.config.num_beams # set decoder_start_token_id for MBart if model.config.decoder_start_token_id is None and isinstance(SCREAMING_SNAKE_CASE , (MBartTokenizer, MBartTokenizerFast) ): assert ( data_args.tgt_lang is not None and data_args.src_lang is not None ), "mBart requires --tgt_lang and --src_lang" if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ): UpperCamelCase : List[str] = tokenizer.lang_code_to_id[data_args.tgt_lang] else: UpperCamelCase : List[Any] = tokenizer.convert_tokens_to_ids(data_args.tgt_lang ) if model_args.freeze_embeds: freeze_embeds(SCREAMING_SNAKE_CASE ) if model_args.freeze_encoder: freeze_params(model.get_encoder() ) assert_all_frozen(model.get_encoder() ) UpperCamelCase : Optional[Any] = SeqaSeqDataset # Get datasets UpperCamelCase : List[Any] = ( dataset_class( SCREAMING_SNAKE_CASE , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_train else None ) UpperCamelCase : Optional[int] = ( dataset_class( SCREAMING_SNAKE_CASE , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO else None ) UpperCamelCase : Optional[Any] = ( dataset_class( SCREAMING_SNAKE_CASE , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , ) if training_args.do_predict else None ) # Initialize our Trainer UpperCamelCase : Tuple = ( build_compute_metrics_fn(data_args.task , SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate else None ) UpperCamelCase : Optional[int] = SeqaSeqTrainer( model=SCREAMING_SNAKE_CASE , args=SCREAMING_SNAKE_CASE , data_args=SCREAMING_SNAKE_CASE , train_dataset=SCREAMING_SNAKE_CASE , eval_dataset=SCREAMING_SNAKE_CASE , data_collator=SeqaSeqDataCollator( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , ) UpperCamelCase : Optional[Any] = {} # Training if training_args.do_train: logger.info("""*** Train ***""" ) UpperCamelCase : Optional[Any] = trainer.train( model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None ) UpperCamelCase : Dict = train_result.metrics UpperCamelCase : Optional[int] = data_args.n_train trainer.save_model() # this also saves the tokenizer if trainer.is_world_process_zero(): handle_metrics("""train""" , SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(SCREAMING_SNAKE_CASE ) # Need to save the state, since Trainer.save_model saves only the tokenizer with the model trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) ) # For convenience, we also re-save the tokenizer to the same directory, # so that you can share your model easily on huggingface.co/models =) tokenizer.save_pretrained(training_args.output_dir ) # Evaluation if training_args.do_eval: logger.info("""*** Evaluate ***""" ) UpperCamelCase : List[str] = trainer.evaluate(metric_key_prefix="""val""" ) UpperCamelCase : int = data_args.n_val UpperCamelCase : Any = round(metrics["""val_loss"""] , 4 ) if trainer.is_world_process_zero(): handle_metrics("""val""" , SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(SCREAMING_SNAKE_CASE ) if training_args.do_predict: logger.info("""*** Predict ***""" ) UpperCamelCase : List[Any] = trainer.predict(test_dataset=SCREAMING_SNAKE_CASE , metric_key_prefix="""test""" ) UpperCamelCase : Union[str, Any] = test_output.metrics UpperCamelCase : List[str] = data_args.n_test if trainer.is_world_process_zero(): UpperCamelCase : Dict = round(metrics["""test_loss"""] , 4 ) handle_metrics("""test""" , SCREAMING_SNAKE_CASE , training_args.output_dir ) all_metrics.update(SCREAMING_SNAKE_CASE ) if training_args.predict_with_generate: UpperCamelCase : str = tokenizer.batch_decode( test_output.predictions , skip_special_tokens=SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE ) UpperCamelCase : Any = lmap(str.strip , SCREAMING_SNAKE_CASE ) write_txt_file(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """test_generations.txt""" ) ) if trainer.is_world_process_zero(): save_json(SCREAMING_SNAKE_CASE , os.path.join(training_args.output_dir , """all_results.json""" ) ) return all_metrics def UpperCamelCase (SCREAMING_SNAKE_CASE ): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
102
def _a ( lowercase__ : int = 60_08_51_47_51_43 ): '''simple docstring''' try: SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) SCREAMING_SNAKE_CASE__ : int = 2 SCREAMING_SNAKE_CASE__ : int = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 SCREAMING_SNAKE_CASE__ : str = i while n % i == 0: SCREAMING_SNAKE_CASE__ : List[Any] = n // i i += 1 return int(lowercase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
85
0
"""simple docstring""" def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: return round(float(moles / volume ) * nfactor ) def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: return round(float((moles * 0.0821 * temperature) / (volume) ) ) def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float: return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
103
def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = int(lowercase__ ) if n_element < 1: SCREAMING_SNAKE_CASE__ : Tuple = ValueError('a should be a positive number' ) raise my_error SCREAMING_SNAKE_CASE__ : Any = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (0, 0, 0) SCREAMING_SNAKE_CASE__ : Any = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") SCREAMING_SNAKE_CASE__ : int = hamming(int(n)) print("-----------------------------------------------------") print(F"""The list with nth numbers is: {hamming_numbers}""") print("-----------------------------------------------------")
85
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_videomae import VideoMAEImageProcessor UpperCamelCase = logging.get_logger(__name__) class UpperCamelCase__ ( _lowerCAmelCase ): """simple docstring""" def __init__( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> None: warnings.warn( "The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use VideoMAEImageProcessor instead." , SCREAMING_SNAKE_CASE__ , ) super().__init__(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
104
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. UpperCamelCase__ : Tuple = 10 def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[int] , lowerCamelCase_ : int ) -> int: """simple docstring""" for i in range(lowerCamelCase_ , lowerCamelCase_ ): if array[i] == target: return i return -1 def __UpperCAmelCase ( lowerCamelCase_ : list[int] , lowerCamelCase_ : int ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE_ : Any = 0 SCREAMING_SNAKE_CASE_ : Dict = len(lowerCamelCase_ ) while left <= right: if right - left < precision: return lin_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Any = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE_ : int = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: SCREAMING_SNAKE_CASE_ : Optional[Any] = one_third - 1 elif array[two_third] < target: SCREAMING_SNAKE_CASE_ : Tuple = two_third + 1 else: SCREAMING_SNAKE_CASE_ : Optional[Any] = one_third + 1 SCREAMING_SNAKE_CASE_ : Dict = two_third - 1 else: return -1 def __UpperCAmelCase ( lowerCamelCase_ : int , lowerCamelCase_ : int , lowerCamelCase_ : list[int] , lowerCamelCase_ : int ) -> int: """simple docstring""" if left < right: if right - left < precision: return lin_search(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) SCREAMING_SNAKE_CASE_ : Dict = (left + right) // 3 + 1 SCREAMING_SNAKE_CASE_ : Tuple = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(lowerCamelCase_ , one_third - 1 , lowerCamelCase_ , lowerCamelCase_ ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , lowerCamelCase_ , lowerCamelCase_ ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() UpperCamelCase__ : List[Any] = input('''Enter numbers separated by comma:\n''').strip() UpperCamelCase__ : Union[str, Any] = [int(item.strip()) for item in user_input.split(''',''')] assert collection == sorted(collection), F"List must be ordered.\n{collection}." UpperCamelCase__ : Any = int(input('''Enter the number to be found in the list:\n''').strip()) UpperCamelCase__ : Optional[int] = ite_ternary_search(collection, target) UpperCamelCase__ : Tuple = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F"""Iterative search: {target} found at positions: {resulta}""") print(F"""Recursive search: {target} found at positions: {resulta}""") else: print('''Not found''')
105
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[str] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
from __future__ import annotations def lowerCamelCase_ ( lowerCAmelCase__ : list[int] ) -> list[int]: '''simple docstring''' if len(lowerCAmelCase__ ) == 0: return array A , A = min(lowerCAmelCase__ ), max(lowerCAmelCase__ ) # Compute the variables A = _max - _min + 1 A , A = [0] * holes_range, [0] * holes_range # Make the sorting. for i in array: A = i - _min A = i holes_repeat[index] += 1 # Makes the array back by replacing the numbers. A = 0 for i in range(lowerCAmelCase__ ): while holes_repeat[i] > 0: A = holes[i] index += 1 holes_repeat[i] -= 1 # Returns the sorted array. return array if __name__ == "__main__": import doctest doctest.testmod() __snake_case :Dict =input('Enter numbers separated by comma:\n') __snake_case :Tuple =[int(x) for x in user_input.split(',')] print(pigeon_sort(unsorted))
106
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg") SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def _a ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def _a ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def _a ( ): '''simple docstring''' assert med.median_filter(lowercase__ , 3 ).any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
85
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _UpperCAmelCase : str = { '''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig'''] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Tuple = ['''VisionEncoderDecoderModel'''] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : str = ['''TFVisionEncoderDecoderModel'''] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _UpperCAmelCase : Any = ['''FlaxVisionEncoderDecoderModel'''] if TYPE_CHECKING: from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel else: import sys _UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
107
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: SCREAMING_SNAKE_CASE__ : Tuple = json.load(f) @require_torch class snake_case ( unittest.TestCase ): def __lowercase( self : List[str] , a_ : Any )-> str: """simple docstring""" return FSMTTokenizer.from_pretrained(a_ ) def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]: """simple docstring""" # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ ) SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src'] SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt'] SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode( a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ ) print(a_ ) self.assertGreaterEqual(scores['bleu'] , a_ )
85
0
import numpy as np __a: List[str] = [ ['''a''', '''b''', '''c''', '''d''', '''e'''], ['''f''', '''g''', '''h''', '''i''', '''k'''], ['''l''', '''m''', '''n''', '''o''', '''p'''], ['''q''', '''r''', '''s''', '''t''', '''u'''], ['''v''', '''w''', '''x''', '''y''', '''z'''], ] class SCREAMING_SNAKE_CASE__ : '''simple docstring''' def __init__( self : Union[str, Any] ) -> None: """simple docstring""" _UpperCAmelCase = np.array(lowerCamelCase ) def lowerCamelCase ( self : Tuple , lowerCamelCase : str ) -> np.ndarray: """simple docstring""" _UpperCAmelCase , _UpperCAmelCase = np.where(letter == self.SQUARE ) _UpperCAmelCase = np.concatenate([indexa + 1, indexa + 1] ) return indexes def lowerCamelCase ( self : str , lowerCamelCase : int , lowerCamelCase : int ) -> str: """simple docstring""" _UpperCAmelCase = self.SQUARE[indexa - 1, indexa - 1] return letter def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str ) -> str: """simple docstring""" _UpperCAmelCase = message.lower() _UpperCAmelCase = message.replace(""" """ , """""" ) _UpperCAmelCase = message.replace("""j""" , """i""" ) _UpperCAmelCase = np.empty((2, len(lowerCamelCase )) ) for letter_index in range(len(lowerCamelCase ) ): _UpperCAmelCase = self.letter_to_numbers(message[letter_index] ) _UpperCAmelCase = numbers[0] _UpperCAmelCase = numbers[1] _UpperCAmelCase = first_step.reshape(2 * len(lowerCamelCase ) ) _UpperCAmelCase = """""" for numbers_index in range(len(lowerCamelCase ) ): _UpperCAmelCase = int(second_step[numbers_index * 2] ) _UpperCAmelCase = int(second_step[(numbers_index * 2) + 1] ) _UpperCAmelCase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) _UpperCAmelCase = encoded_message + letter return encoded_message def lowerCamelCase ( self : Optional[int] , lowerCamelCase : str ) -> str: """simple docstring""" _UpperCAmelCase = message.lower() message.replace(""" """ , """""" ) _UpperCAmelCase = np.empty(2 * len(lowerCamelCase ) ) for letter_index in range(len(lowerCamelCase ) ): _UpperCAmelCase = self.letter_to_numbers(message[letter_index] ) _UpperCAmelCase = numbers[0] _UpperCAmelCase = numbers[1] _UpperCAmelCase = first_step.reshape((2, len(lowerCamelCase )) ) _UpperCAmelCase = """""" for numbers_index in range(len(lowerCamelCase ) ): _UpperCAmelCase = int(second_step[0, numbers_index] ) _UpperCAmelCase = int(second_step[1, numbers_index] ) _UpperCAmelCase = self.numbers_to_letter(lowerCamelCase , lowerCamelCase ) _UpperCAmelCase = decoded_message + letter return decoded_message
108
import os import pytest from attr import dataclass SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region @dataclass class snake_case : lowercase_ = 42 lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' lowercase_ = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } lowercase_ = {**hyperparameters, 'max_steps': 1_000} @property def __lowercase( self : List[str] )-> str: """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" return F'''{self.framework}-transfromers-test''' @property def __lowercase( self : int )-> str: """simple docstring""" return F'''./tests/sagemaker/scripts/{self.framework}''' @property def __lowercase( self : Tuple )-> str: """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def _a ( lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
85
0
'''simple docstring''' import math from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass # Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->UnCLIP class __a ( _snake_case ): __UpperCamelCase : torch.FloatTensor __UpperCamelCase : Optional[torch.FloatTensor] = None def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Union[str, Any]: '''simple docstring''' if alpha_transform_type == "cosine": def alpha_bar_fn(__UpperCAmelCase ): return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2 elif alpha_transform_type == "exp": def alpha_bar_fn(__UpperCAmelCase ): return math.exp(t * -1_2.0 ) else: raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" ) __SCREAMING_SNAKE_CASE = [] for i in range(__UpperCAmelCase ): __SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps __SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) ) return torch.tensor(__UpperCAmelCase , dtype=torch.floataa ) class __a ( _snake_case, _snake_case ): @register_to_config def __init__( self : Tuple ,lowerCamelCase : int = 1000 ,lowerCamelCase : str = "fixed_small_log" ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[float] = 1.0 ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : str = "squaredcos_cap_v2" ,): '''simple docstring''' if beta_schedule != "squaredcos_cap_v2": raise ValueError("""UnCLIPScheduler only supports `beta_schedule`: 'squaredcos_cap_v2'""" ) __SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ) __SCREAMING_SNAKE_CASE = 1.0 - self.betas __SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 ) __SCREAMING_SNAKE_CASE = torch.tensor(1.0 ) # standard deviation of the initial noise distribution __SCREAMING_SNAKE_CASE = 1.0 # setable values __SCREAMING_SNAKE_CASE = None __SCREAMING_SNAKE_CASE = torch.from_numpy(np.arange(0 ,lowerCamelCase )[::-1].copy() ) __SCREAMING_SNAKE_CASE = variance_type def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ): '''simple docstring''' return sample def UpperCAmelCase__ ( self : str ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ): '''simple docstring''' __SCREAMING_SNAKE_CASE = num_inference_steps __SCREAMING_SNAKE_CASE = (self.config.num_train_timesteps - 1) / (self.num_inference_steps - 1) __SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(np.intaa ) __SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(lowerCamelCase ) def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : List[str]=None ,lowerCamelCase : Any=None ): '''simple docstring''' if prev_timestep is None: __SCREAMING_SNAKE_CASE = t - 1 __SCREAMING_SNAKE_CASE = self.alphas_cumprod[t] __SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __SCREAMING_SNAKE_CASE = self.betas[t] else: __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample __SCREAMING_SNAKE_CASE = beta_prod_t_prev / beta_prod_t * beta if variance_type is None: __SCREAMING_SNAKE_CASE = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small_log": __SCREAMING_SNAKE_CASE = torch.log(torch.clamp(lowerCamelCase ,min=1E-2_0 ) ) __SCREAMING_SNAKE_CASE = torch.exp(0.5 * variance ) elif variance_type == "learned_range": # NOTE difference with DDPM scheduler __SCREAMING_SNAKE_CASE = variance.log() __SCREAMING_SNAKE_CASE = beta.log() __SCREAMING_SNAKE_CASE = (predicted_variance + 1) / 2 __SCREAMING_SNAKE_CASE = frac * max_log + (1 - frac) * min_log return variance def UpperCAmelCase__ ( self : str ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : int ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Tuple=None ,lowerCamelCase : bool = True ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = timestep if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type == "learned_range": __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = torch.split(lowerCamelCase ,sample.shape[1] ,dim=1 ) else: __SCREAMING_SNAKE_CASE = None # 1. compute alphas, betas if prev_timestep is None: __SCREAMING_SNAKE_CASE = t - 1 __SCREAMING_SNAKE_CASE = self.alphas_cumprod[t] __SCREAMING_SNAKE_CASE = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.one __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t_prev if prev_timestep == t - 1: __SCREAMING_SNAKE_CASE = self.betas[t] __SCREAMING_SNAKE_CASE = self.alphas[t] else: __SCREAMING_SNAKE_CASE = 1 - alpha_prod_t / alpha_prod_t_prev __SCREAMING_SNAKE_CASE = 1 - beta # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": __SCREAMING_SNAKE_CASE = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": __SCREAMING_SNAKE_CASE = model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon` or `sample`""" """ for the UnCLIPScheduler.""" ) # 3. Clip "predicted x_0" if self.config.clip_sample: __SCREAMING_SNAKE_CASE = torch.clamp( lowerCamelCase ,-self.config.clip_sample_range ,self.config.clip_sample_range ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE = (alpha_prod_t_prev ** 0.5 * beta) / beta_prod_t __SCREAMING_SNAKE_CASE = alpha ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf __SCREAMING_SNAKE_CASE = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise __SCREAMING_SNAKE_CASE = 0 if t > 0: __SCREAMING_SNAKE_CASE = randn_tensor( model_output.shape ,dtype=model_output.dtype ,generator=lowerCamelCase ,device=model_output.device ) __SCREAMING_SNAKE_CASE = self._get_variance( lowerCamelCase ,predicted_variance=lowerCamelCase ,prev_timestep=lowerCamelCase ,) if self.variance_type == "fixed_small_log": __SCREAMING_SNAKE_CASE = variance elif self.variance_type == "learned_range": __SCREAMING_SNAKE_CASE = (0.5 * variance).exp() else: raise ValueError( f"""variance_type given as {self.variance_type} must be one of `fixed_small_log` or `learned_range`""" """ for the UnCLIPScheduler.""" ) __SCREAMING_SNAKE_CASE = variance * variance_noise __SCREAMING_SNAKE_CASE = pred_prev_sample + variance if not return_dict: return (pred_prev_sample,) return UnCLIPSchedulerOutput(prev_sample=lowerCamelCase ,pred_original_sample=lowerCamelCase ) def UpperCAmelCase__ ( self : Any ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.IntTensor ,): '''simple docstring''' __SCREAMING_SNAKE_CASE = self.alphas_cumprod.to(device=original_samples.device ,dtype=original_samples.dtype ) __SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ) __SCREAMING_SNAKE_CASE = alphas_cumprod[timesteps] ** 0.5 __SCREAMING_SNAKE_CASE = sqrt_alpha_prod.flatten() while len(sqrt_alpha_prod.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE = sqrt_alpha_prod.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE = (1 - alphas_cumprod[timesteps]) ** 0.5 __SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.flatten() while len(sqrt_one_minus_alpha_prod.shape ) < len(original_samples.shape ): __SCREAMING_SNAKE_CASE = sqrt_one_minus_alpha_prod.unsqueeze(-1 ) __SCREAMING_SNAKE_CASE = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise return noisy_samples
109
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = FunnelTokenizer lowercase_ = FunnelTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : str = [ '<unk>', '<cls>', '<sep>', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase( self : Any , **a_ : Any )-> List[str]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Tuple , **a_ : List[Any] )-> List[Any]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE__ : int = 'unwanted, running' return input_text, output_text def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(do_lower_case=a_ ) for tokenizer in tokenizers: SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer('UNwant\u00E9d,running' ) SCREAMING_SNAKE_CASE__ : List[Any] = len(inputs['input_ids'] ) - 1 self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' ) self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
85
0
"""simple docstring""" from typing import TYPE_CHECKING # rely on isort to merge the imports from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available UpperCamelCase__ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: UpperCamelCase__ = [ 'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST', 'FocalNetForImageClassification', 'FocalNetForMaskedImageModeling', 'FocalNetBackbone', 'FocalNetModel', 'FocalNetPreTrainedModel', ] if TYPE_CHECKING: from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_focalnet import ( FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST, FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, FocalNetPreTrainedModel, ) else: import sys UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
110
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case ( UpperCamelCase_ ): lowercase_ = 'levit' def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : Any = image_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Any = kernel_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride SCREAMING_SNAKE_CASE__ : Any = padding SCREAMING_SNAKE_CASE__ : Any = hidden_sizes SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = key_dim SCREAMING_SNAKE_CASE__ : int = drop_path_rate SCREAMING_SNAKE_CASE__ : List[str] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case ( UpperCamelCase_ ): lowercase_ = version.parse('1.11' ) @property def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowercase( self : Any )-> float: """simple docstring""" return 1e-4
85
0
'''simple docstring''' def UpperCamelCase_ ( A__ : int , A__ : float , A__ : float ): '''simple docstring''' return round(float(moles / volume ) * nfactor ) def UpperCamelCase_ ( A__ : float , A__ : float , A__ : float ): '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (volume) ) ) def UpperCamelCase_ ( A__ : float , A__ : float , A__ : float ): '''simple docstring''' return round(float((moles * 0.0821 * temperature) / (pressure) ) ) def UpperCamelCase_ ( A__ : float , A__ : float , A__ : float ): '''simple docstring''' return round(float((pressure * volume) / (0.0821 * moles) ) ) if __name__ == "__main__": import doctest doctest.testmod()
275
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = StableDiffusionInstructPixaPixPipeline lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowercase( self : str )-> int: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ ) SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE__ : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' ) if str(a_ ).startswith('mps' ): SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries' SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ ) SCREAMING_SNAKE_CASE__ : Dict = output.images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2 SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5 SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(a_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae'] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def __lowercase( self : Tuple )-> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) SCREAMING_SNAKE_CASE__ : Tuple = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 0 def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None: SCREAMING_SNAKE_CASE__ : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() pipe(**a_ , callback=a_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __lowercase( self : int )-> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) ) SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
85
0
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging lowercase : List[Any] = logging.get_logger(__name__) class __snake_case ( UpperCamelCase_ ): _a : Optional[int]= ["pixel_values"] def __init__( self ,snake_case = True ,snake_case = 1 / 255 ,snake_case = True ,snake_case = 8 ,**snake_case ,): '''simple docstring''' super().__init__(**a_ ) lowercase : List[str] = do_rescale lowercase : Union[str, Any] = rescale_factor lowercase : Dict = do_pad lowercase : Any = pad_size def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = None ,**snake_case ): '''simple docstring''' return rescale(a_ ,scale=a_ ,data_format=a_ ,**a_ ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case = None ): '''simple docstring''' lowercase : str = get_image_size(a_ ) lowercase : Tuple = (old_height // size + 1) * size - old_height lowercase : List[Any] = (old_width // size + 1) * size - old_width return pad(a_ ,((0, pad_height), (0, pad_width)) ,mode="""symmetric""" ,data_format=a_ ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = None ,snake_case = ChannelDimension.FIRST ,**snake_case ,): '''simple docstring''' lowercase : int = do_rescale if do_rescale is not None else self.do_rescale lowercase : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor lowercase : List[str] = do_pad if do_pad is not None else self.do_pad lowercase : List[str] = pad_size if pad_size is not None else self.pad_size lowercase : Tuple = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( """Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """ """torch.Tensor, tf.Tensor or jax.ndarray.""" ) if do_rescale and rescale_factor is None: raise ValueError("""Rescale factor must be specified if do_rescale is True.""" ) # All transformations expect numpy arrays. lowercase : List[str] = [to_numpy_array(a_ ) for image in images] if do_rescale: lowercase : Union[str, Any] = [self.rescale(image=a_ ,scale=a_ ) for image in images] if do_pad: lowercase : str = [self.pad(a_ ,size=a_ ) for image in images] lowercase : List[str] = [to_channel_dimension_format(a_ ,a_ ) for image in images] lowercase : Tuple = {'pixel_values': images} return BatchFeature(data=a_ ,tensor_type=a_ )
336
import math from collections.abc import Callable def _a ( lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : float = xa SCREAMING_SNAKE_CASE__ : float = xa while True: if x_n == x_na or function(lowercase__ ) == function(lowercase__ ): raise ZeroDivisionError('float division by zero, could not find root' ) SCREAMING_SNAKE_CASE__ : float = x_na - ( function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na SCREAMING_SNAKE_CASE__ : Dict = x_na SCREAMING_SNAKE_CASE__ : List[str] = x_na def _a ( lowercase__ : float ): '''simple docstring''' return math.pow(lowercase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
85
0
"""simple docstring""" from argparse import ArgumentParser from .add_new_model import AddNewModelCommand from .add_new_model_like import AddNewModelLikeCommand from .convert import ConvertCommand from .download import DownloadCommand from .env import EnvironmentCommand from .lfs import LfsCommands from .pt_to_tf import PTtoTFCommand from .run import RunCommand from .serving import ServeCommand from .user import UserCommands def lowercase () -> List[str]: '''simple docstring''' __UpperCamelCase = ArgumentParser("Transformers CLI tool" ,usage="transformers-cli <command> [<args>]" ) __UpperCamelCase = parser.add_subparsers(help="transformers-cli command helpers" ) # Register commands ConvertCommand.register_subcommand(lowercase__ ) DownloadCommand.register_subcommand(lowercase__ ) EnvironmentCommand.register_subcommand(lowercase__ ) RunCommand.register_subcommand(lowercase__ ) ServeCommand.register_subcommand(lowercase__ ) UserCommands.register_subcommand(lowercase__ ) AddNewModelCommand.register_subcommand(lowercase__ ) AddNewModelLikeCommand.register_subcommand(lowercase__ ) LfsCommands.register_subcommand(lowercase__ ) PTtoTFCommand.register_subcommand(lowercase__ ) # Let's go __UpperCamelCase = parser.parse_args() if not hasattr(lowercase__ ,"func" ): parser.print_help() exit(1 ) # Run __UpperCamelCase = args.func(lowercase__ ) service.run() if __name__ == "__main__": main()
505
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'AutoImageProcessor' lowercase_ = 'AutoTokenizer' def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]: """simple docstring""" super().__init__(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self.image_processor def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Any )-> Any: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
85
0
import argparse import json import os import time import zipfile from get_ci_error_statistics import download_artifact, get_artifacts_links from transformers import logging _lowercase : Any =logging.get_logger(__name__) def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ : Tuple = set() lowerCamelCase_ : Any = [] def parse_line(lowerCAmelCase__ ): for line in fp: if isinstance(lowercase__ ,lowercase__ ): lowerCamelCase_ : int = line.decode('UTF-8' ) if "warnings summary (final)" in line: continue # This means we are outside the body of a warning elif not line.startswith(' ' ): # process a single warning and move it to `selected_warnings`. if len(lowercase__ ) > 0: lowerCamelCase_ : Optional[int] = '\n'.join(lowercase__ ) # Only keep the warnings specified in `targets` if any(F": {x}: " in warning for x in targets ): selected_warnings.add(lowercase__ ) buffer.clear() continue else: lowerCamelCase_ : Optional[int] = line.strip() buffer.append(lowercase__ ) if from_gh: for filename in os.listdir(lowercase__ ): lowerCamelCase_ : List[Any] = os.path.join(lowercase__ ,lowercase__ ) if not os.path.isdir(lowercase__ ): # read the file if filename != "warnings.txt": continue with open(lowercase__ ) as fp: parse_line(lowercase__ ) else: try: with zipfile.ZipFile(lowercase__ ) as z: for filename in z.namelist(): if not os.path.isdir(lowercase__ ): # read the file if filename != "warnings.txt": continue with z.open(lowercase__ ) as fp: parse_line(lowercase__ ) except Exception: logger.warning( F"{artifact_path} is either an invalid zip file or something else wrong. This file is skipped." ) return selected_warnings def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ : Optional[int] = set() lowerCamelCase_ : Optional[int] = [os.path.join(lowercase__ ,lowercase__ ) for p in os.listdir(lowercase__ ) if (p.endswith('.zip' ) or from_gh)] for p in paths: selected_warnings.update(extract_warnings_from_single_artifact(lowercase__ ,lowercase__ ) ) return selected_warnings if __name__ == "__main__": def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): return values.split(',' ) _lowercase : Any =argparse.ArgumentParser() # Required parameters parser.add_argument("""--workflow_run_id""", type=str, required=True, help="""A GitHub Actions workflow run id.""") parser.add_argument( """--output_dir""", type=str, required=True, help="""Where to store the downloaded artifacts and other result files.""", ) parser.add_argument("""--token""", default=None, type=str, help="""A token that has actions:read permission.""") # optional parameters parser.add_argument( """--targets""", default="""DeprecationWarning,UserWarning,FutureWarning""", type=list_str, help="""Comma-separated list of target warning(s) which we want to extract.""", ) parser.add_argument( """--from_gh""", action="""store_true""", help="""If running from a GitHub action workflow and collecting warnings from its artifacts.""", ) _lowercase : Dict =parser.parse_args() _lowercase : Any =args.from_gh if from_gh: # The artifacts have to be downloaded using `actions/download-artifact@v3` pass else: os.makedirs(args.output_dir, exist_ok=True) # get download links _lowercase : Dict =get_artifacts_links(args.workflow_run_id, token=args.token) with open(os.path.join(args.output_dir, """artifacts.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(artifacts, fp, ensure_ascii=False, indent=4) # download artifacts for idx, (name, url) in enumerate(artifacts.items()): print(name) print(url) print("""=""" * 80) download_artifact(name, url, args.output_dir, args.token) # Be gentle to GitHub time.sleep(1) # extract warnings from artifacts _lowercase : Any =extract_warnings(args.output_dir, args.targets) _lowercase : Any =sorted(selected_warnings) with open(os.path.join(args.output_dir, """selected_warnings.json"""), """w""", encoding="""UTF-8""") as fp: json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
364
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _a ( lowercase__ : int = 3 ): '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(lowercase__ ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' ) SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits for i in range(lowercase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowercase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowercase__ , lowercase__ ) # simulate with 10000 shots SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' ) SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 ) return job.result().get_counts(lowercase__ ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
85
0
'''simple docstring''' import unittest from transformers import PegasusConfig, PegasusTokenizer, is_flax_available from transformers.testing_utils import require_flax, slow from ...test_configuration_common import ConfigTester from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor if is_flax_available(): import os # The slow tests are often failing with OOM error on GPU # This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed # but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html lowercase : Dict = "platform" import jax import jax.numpy as jnp import numpy as np from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel @require_flax class _a : '''simple docstring''' lowerCAmelCase_ : str = PegasusConfig lowerCAmelCase_ : Optional[int] = {} lowerCAmelCase_ : Dict = """gelu""" def __init__( self ,__a ,__a=13 ,__a=7 ,__a=True ,__a=False ,__a=99 ,__a=32 ,__a=5 ,__a=4 ,__a=37 ,__a=0.1 ,__a=0.1 ,__a=20 ,__a=2 ,__a=1 ,__a=0 ,) -> List[Any]: snake_case : Optional[Any] = parent snake_case : Tuple = batch_size snake_case : str = seq_length snake_case : int = is_training snake_case : str = use_labels snake_case : int = vocab_size snake_case : Any = hidden_size snake_case : Optional[Any] = num_hidden_layers snake_case : Any = num_attention_heads snake_case : List[str] = intermediate_size snake_case : str = hidden_dropout_prob snake_case : List[Any] = attention_probs_dropout_prob snake_case : Any = max_position_embeddings snake_case : Dict = eos_token_id snake_case : Dict = pad_token_id snake_case : Any = bos_token_id def snake_case_ ( self ) -> str: snake_case : Optional[int] = ids_tensor([self.batch_size, self.seq_length - 1] ,self.vocab_size ).clip(3 ,self.vocab_size ) snake_case : Optional[int] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) ,1 ) snake_case : Dict = np.concatenate([input_ids, eos_tensor] ,axis=1 ) snake_case : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size ) snake_case : Any = self.config_cls( vocab_size=self.vocab_size ,d_model=self.hidden_size ,encoder_layers=self.num_hidden_layers ,decoder_layers=self.num_hidden_layers ,encoder_attention_heads=self.num_attention_heads ,decoder_attention_heads=self.num_attention_heads ,encoder_ffn_dim=self.intermediate_size ,decoder_ffn_dim=self.intermediate_size ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,eos_token_ids=[2] ,bos_token_id=self.bos_token_id ,pad_token_id=self.pad_token_id ,decoder_start_token_id=self.pad_token_id ,**self.config_updates ,) snake_case : Optional[int] = prepare_pegasus_inputs_dict(a_ ,a_ ,a_ ) return config, inputs_dict def snake_case_ ( self ,__a ,__a ,__a ) -> Union[str, Any]: snake_case : Tuple = 20 snake_case : int = model_class_name(a_ ) snake_case : Tuple = model.encode(inputs_dict["""input_ids"""] ) snake_case : int = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) snake_case : List[str] = model.init_cache(decoder_input_ids.shape[0] ,a_ ,a_ ) snake_case : List[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) ,dtype="""i4""" ) snake_case : List[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) snake_case : str = model.decode( decoder_input_ids[:, :-1] ,a_ ,decoder_attention_mask=a_ ,past_key_values=a_ ,decoder_position_ids=a_ ,) snake_case : Union[str, Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) snake_case : Optional[Any] = model.decode( decoder_input_ids[:, -1:] ,a_ ,decoder_attention_mask=a_ ,past_key_values=outputs_cache.past_key_values ,decoder_position_ids=a_ ,) snake_case : Tuple = model.decode(a_ ,a_ ) snake_case : Union[str, Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 ,msg=F'''Max diff is {diff}''' ) def snake_case_ ( self ,__a ,__a ,__a ) -> str: snake_case : Optional[Any] = 20 snake_case : str = model_class_name(a_ ) snake_case : List[Any] = model.encode(inputs_dict["""input_ids"""] ) snake_case : Tuple = ( inputs_dict['decoder_input_ids'], inputs_dict['decoder_attention_mask'], ) snake_case : Any = jnp.concatenate( [ decoder_attention_mask, jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ), ] ,axis=-1 ,) snake_case : str = model.init_cache(decoder_input_ids.shape[0] ,a_ ,a_ ) snake_case : Optional[Any] = jnp.broadcast_to( jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] ,(decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) ,) snake_case : Dict = model.decode( decoder_input_ids[:, :-1] ,a_ ,decoder_attention_mask=a_ ,past_key_values=a_ ,decoder_position_ids=a_ ,) snake_case : Optional[int] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] ,dtype="""i4""" ) snake_case : int = model.decode( decoder_input_ids[:, -1:] ,a_ ,past_key_values=outputs_cache.past_key_values ,decoder_attention_mask=a_ ,decoder_position_ids=a_ ,) snake_case : Any = model.decode(a_ ,a_ ,decoder_attention_mask=a_ ) snake_case : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) ) self.parent.assertTrue(diff < 1E-3 ,msg=F'''Max diff is {diff}''' ) def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase , __lowercase=None , __lowercase=None , ): if attention_mask is None: snake_case : Union[str, Any] = np.not_equal(lowercase__ , config.pad_token_id ).astype(np.inta ) if decoder_attention_mask is None: snake_case : str = np.concatenate( [ np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ), np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ), ] , axis=-1 , ) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_flax class _a (UpperCamelCase_, unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : str = ( ( FlaxPegasusForConditionalGeneration, FlaxPegasusModel, ) if is_flax_available() else () ) lowerCAmelCase_ : int = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else () lowerCAmelCase_ : Any = True lowerCAmelCase_ : List[str] = False lowerCAmelCase_ : Optional[Any] = False lowerCAmelCase_ : Optional[Any] = False def snake_case_ ( self ) -> str: snake_case : List[Any] = FlaxPegasusModelTester(self ) snake_case : Dict = ConfigTester(self ,config_class=a_ ) def snake_case_ ( self ) -> int: self.config_tester.run_common_tests() def snake_case_ ( self ) -> Tuple: snake_case : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward(a_ ,a_ ,a_ ) def snake_case_ ( self ) -> Tuple: snake_case : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: self.model_tester.check_use_cache_forward_with_attn_mask(a_ ,a_ ,a_ ) def snake_case_ ( self ) -> Any: snake_case : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): snake_case : Tuple = self._prepare_for_class(a_ ,a_ ) snake_case : List[Any] = model_class(a_ ) @jax.jit def encode_jitted(__a ,__a=None ,**__a ): return model.encode(input_ids=a_ ,attention_mask=a_ ) with self.subTest("""JIT Enabled""" ): snake_case : str = encode_jitted(**a_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): snake_case : Dict = encode_jitted(**a_ ).to_tuple() self.assertEqual(len(a_ ) ,len(a_ ) ) for jitted_output, output in zip(a_ ,a_ ): self.assertEqual(jitted_output.shape ,output.shape ) def snake_case_ ( self ) -> Dict: snake_case : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: with self.subTest(model_class.__name__ ): snake_case : int = model_class(a_ ) snake_case : str = model.encode(inputs_dict["""input_ids"""] ,inputs_dict["""attention_mask"""] ) snake_case : List[str] = { 'decoder_input_ids': inputs_dict['decoder_input_ids'], 'decoder_attention_mask': inputs_dict['decoder_attention_mask'], 'encoder_outputs': encoder_outputs, } @jax.jit def decode_jitted(__a ,__a ,__a ): return model.decode( decoder_input_ids=a_ ,decoder_attention_mask=a_ ,encoder_outputs=a_ ,) with self.subTest("""JIT Enabled""" ): snake_case : Any = decode_jitted(**a_ ).to_tuple() with self.subTest("""JIT Disabled""" ): with jax.disable_jit(): snake_case : List[Any] = decode_jitted(**a_ ).to_tuple() self.assertEqual(len(a_ ) ,len(a_ ) ) for jitted_output, output in zip(a_ ,a_ ): self.assertEqual(jitted_output.shape ,output.shape ) @slow def snake_case_ ( self ) -> int: for model_class_name in self.all_model_classes: snake_case : int = model_class_name.from_pretrained("""google/pegasus-large""" ,from_pt=a_ ) snake_case : Dict = np.ones((1, 1) ) snake_case : List[Any] = model(a_ ) self.assertIsNotNone(a_ ) @slow def snake_case_ ( self ) -> Union[str, Any]: snake_case : str = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" ) snake_case : Optional[Any] = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" ) snake_case : List[Any] = [ ' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.', ' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ', ] snake_case : Union[str, Any] = [ 'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.', 'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.', ] snake_case : int = tokenizer(a_ ,return_tensors="""np""" ,truncation=a_ ,max_length=512 ,padding=a_ ) snake_case : List[str] = model.generate(**a_ ,num_beams=2 ).sequences snake_case : Optional[int] = tokenizer.batch_decode(a_ ,skip_special_tokens=a_ ) assert tgt_text == decoded
116
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : int = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.mean(1 ) # Centralize the data of class i SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowercase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 ) SCREAMING_SNAKE_CASE__ : List[str] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.shape[1] SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : str = device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' if features.any(): SCREAMING_SNAKE_CASE__ : Any = features.mean(1 ) # Center the dataset SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) ) SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ ) # Take all the columns in the reverse order (-1), and then takes only the first SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ): '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh( covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions] SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] ) SCREAMING_SNAKE_CASE__ : str = 2 SCREAMING_SNAKE_CASE__ : Dict = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if isinstance(lowercase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ ) if not np.allclose(lowercase__ , lowercase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
85
0
from typing import Any, Dict, List, Union from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_vision_available(): from PIL import Image from ..image_utils import load_image if is_torch_available(): import torch from transformers.modeling_outputs import BaseModelOutput from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING __magic_name__ : List[Any] = logging.get_logger(__name__) @add_end_docstrings(UpperCamelCase_ ) class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ): def __init__( self : List[Any] , **__lowerCamelCase : str ): """simple docstring""" super().__init__(**a_ ) if self.framework == "tf": raise ValueError(F"""The {self.__class__} is only available in PyTorch.""" ) requires_backends(self , '''vision''' ) self.check_model_type(a_ ) def __call__( self : Optional[Any] , __lowerCamelCase : Union[str, "Image.Image", List[Dict[str, Any]]] , __lowerCamelCase : Union[str, List[str]] = None , **__lowerCamelCase : Dict , ): """simple docstring""" if "text_queries" in kwargs: lowerCAmelCase__ = kwargs.pop('''text_queries''' ) if isinstance(a_ , (str, Image.Image) ): lowerCAmelCase__ = {'image': image, 'candidate_labels': candidate_labels} else: lowerCAmelCase__ = image lowerCAmelCase__ = super().__call__(a_ , **a_ ) return results def A__ ( self : Dict , **__lowerCamelCase : str ): """simple docstring""" lowerCAmelCase__ = {} if "threshold" in kwargs: lowerCAmelCase__ = kwargs['threshold'] if "top_k" in kwargs: lowerCAmelCase__ = kwargs['top_k'] return {}, {}, postprocess_params def A__ ( self : Dict , __lowerCamelCase : str ): """simple docstring""" lowerCAmelCase__ = load_image(inputs['''image'''] ) lowerCAmelCase__ = inputs['candidate_labels'] if isinstance(a_ , a_ ): lowerCAmelCase__ = candidate_labels.split(''',''' ) lowerCAmelCase__ = torch.tensor([[image.height, image.width]] , dtype=torch.intaa ) for i, candidate_label in enumerate(a_ ): lowerCAmelCase__ = self.tokenizer(a_ , return_tensors=self.framework ) lowerCAmelCase__ = self.image_processor(a_ , return_tensors=self.framework ) yield { "is_last": i == len(a_ ) - 1, "target_size": target_size, "candidate_label": candidate_label, **text_inputs, **image_features, } def A__ ( self : Any , __lowerCamelCase : Any ): """simple docstring""" lowerCAmelCase__ = model_inputs.pop('''target_size''' ) lowerCAmelCase__ = model_inputs.pop('''candidate_label''' ) lowerCAmelCase__ = model_inputs.pop('''is_last''' ) lowerCAmelCase__ = self.model(**a_ ) lowerCAmelCase__ = {'target_size': target_size, 'candidate_label': candidate_label, 'is_last': is_last, **outputs} return model_outputs def A__ ( self : List[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : Any=0.1 , __lowerCamelCase : Optional[int]=None ): """simple docstring""" lowerCAmelCase__ = [] for model_output in model_outputs: lowerCAmelCase__ = model_output['candidate_label'] lowerCAmelCase__ = BaseModelOutput(a_ ) lowerCAmelCase__ = self.image_processor.post_process_object_detection( outputs=a_ , threshold=a_ , target_sizes=model_output['''target_size'''] )[0] for index in outputs["scores"].nonzero(): lowerCAmelCase__ = outputs['scores'][index].item() lowerCAmelCase__ = self._get_bounding_box(outputs['''boxes'''][index][0] ) lowerCAmelCase__ = {'score': score, 'label': label, 'box': box} results.append(a_ ) lowerCAmelCase__ = sorted(a_ , key=lambda __lowerCamelCase : x["score"] , reverse=a_ ) if top_k: lowerCAmelCase__ = results[:top_k] return results def A__ ( self : Tuple , __lowerCamelCase : "torch.Tensor" ): """simple docstring""" if self.framework != "pt": raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''' ) lowerCAmelCase__ = box.int().tolist() lowerCAmelCase__ = { 'xmin': xmin, 'ymin': ymin, 'xmax': xmax, 'ymax': ymax, } return bbox
615
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip" SCREAMING_SNAKE_CASE__ : Dict = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig( temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage ) SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ ) original.eval() SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('convert the model' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('Make sure that the models\' outputs are identical' ) SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' ) # prepare the model inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE__ : int = encoder_input_ids SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = new_model( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) if are_identical: logging.info('all weights are equal up to 1e-3' ) else: raise ValueError('the weights are different. The new model is likely different from the original one.' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('saving the model\'s state dictionary' ) torch.save( new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
85
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging a = logging.get_logger(__name__) a = { "google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json", "google/bigbird-roberta-large": "https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json", "google/bigbird-base-trivia-itc": "https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json", # See all BigBird models at https://huggingface.co/models?filter=big_bird } class lowercase_ ( UpperCamelCase_ ): '''simple docstring''' UpperCAmelCase : List[Any] = '''big_bird''' def __init__( self : Any , _UpperCAmelCase : List[str]=50_358 , _UpperCAmelCase : Union[str, Any]=768 , _UpperCAmelCase : Optional[Any]=12 , _UpperCAmelCase : Any=12 , _UpperCAmelCase : List[str]=3_072 , _UpperCAmelCase : str="gelu_new" , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : str=0.1 , _UpperCAmelCase : Optional[int]=4_096 , _UpperCAmelCase : Tuple=2 , _UpperCAmelCase : str=0.02 , _UpperCAmelCase : str=1E-1_2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : List[Any]=0 , _UpperCAmelCase : Union[str, Any]=1 , _UpperCAmelCase : List[Any]=2 , _UpperCAmelCase : List[str]=66 , _UpperCAmelCase : Optional[int]="block_sparse" , _UpperCAmelCase : int=True , _UpperCAmelCase : Dict=False , _UpperCAmelCase : Any=64 , _UpperCAmelCase : Tuple=3 , _UpperCAmelCase : Tuple=None , **_UpperCAmelCase : List[str] , ): super().__init__( pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ , sep_token_id=a_ , **a_ , ) _A = vocab_size _A = max_position_embeddings _A = hidden_size _A = num_hidden_layers _A = num_attention_heads _A = intermediate_size _A = hidden_act _A = hidden_dropout_prob _A = attention_probs_dropout_prob _A = initializer_range _A = type_vocab_size _A = layer_norm_eps _A = use_cache _A = rescale_embeddings _A = attention_type _A = use_bias _A = block_size _A = num_random_blocks _A = classifier_dropout class lowercase_ ( UpperCamelCase_ ): '''simple docstring''' @property def lowerCAmelCase_ ( self : List[Any] ): if self.task == "multiple-choice": _A = {0: 'batch', 1: 'choice', 2: 'sequence'} else: _A = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ] )
7
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size SCREAMING_SNAKE_CASE__ : str = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = num_labels SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : str = len(a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_config() return config, pixel_values, labels def __lowercase( self : str )-> str: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowercase_ = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self ) SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __lowercase( self : Any )-> List[Any]: """simple docstring""" pass def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ): SCREAMING_SNAKE_CASE__ : Any = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(a_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE__ : List[Any] = layer_type SCREAMING_SNAKE_CASE__ : Union[str, Any] = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(a_ , a_ , a_ ) def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ): SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple() def recursive_check(a_ : List[Any] , a_ : int ): if isinstance(a_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ): recursive_check(a_ , a_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(a_ , a_ ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(a_ , a_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) def __lowercase( self : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def __lowercase( self : Any )-> List[str]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : List[Any] )-> int: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : Any = prepare_img() SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
85
0
def _a ( SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> Any: '''simple docstring''' return int((input_a, input_a).count(0 ) == 0 ) def _a ( ) -> Optional[int]: '''simple docstring''' assert and_gate(0 , 0 ) == 0 assert and_gate(0 , 1 ) == 0 assert and_gate(1 , 0 ) == 0 assert and_gate(1 , 1 ) == 1 if __name__ == "__main__": test_and_gate() print(and_gate(1, 0)) print(and_gate(0, 0)) print(and_gate(0, 1)) print(and_gate(1, 1))
663
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" import PIL.Image import PIL.ImageOps from packaging import version from PIL import Image if version.parse(version.parse(PIL.__version__).base_version) >= version.parse('''9.1.0'''): _SCREAMING_SNAKE_CASE : Optional[int] = { "linear": PIL.Image.Resampling.BILINEAR, "bilinear": PIL.Image.Resampling.BILINEAR, "bicubic": PIL.Image.Resampling.BICUBIC, "lanczos": PIL.Image.Resampling.LANCZOS, "nearest": PIL.Image.Resampling.NEAREST, } else: _SCREAMING_SNAKE_CASE : Optional[int] = { "linear": PIL.Image.LINEAR, "bilinear": PIL.Image.BILINEAR, "bicubic": PIL.Image.BICUBIC, "lanczos": PIL.Image.LANCZOS, "nearest": PIL.Image.NEAREST, } def lowerCamelCase__ ( _lowerCamelCase : Union[str, Any] ) -> int: lowerCamelCase_ = (images / 2 + 0.5).clamp(0 , 1 ) lowerCamelCase_ = images.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() lowerCamelCase_ = numpy_to_pil(lowercase__ ) return images def lowerCamelCase__ ( _lowerCamelCase : Optional[Any] ) -> Tuple: if images.ndim == 3: lowerCamelCase_ = images[None, ...] lowerCamelCase_ = (images * 255).round().astype('uint8' ) if images.shape[-1] == 1: # special case for grayscale (single channel) images lowerCamelCase_ = [Image.fromarray(image.squeeze() , mode='L' ) for image in images] else: lowerCamelCase_ = [Image.fromarray(lowercase__ ) for image in images] return pil_images
549
def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) if n == 0: return 0 SCREAMING_SNAKE_CASE__ : str = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : int = max( lowercase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase__ ) ) return max_revue def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : str = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(lowercase__ , lowercase__ , lowercase__ ) def _a ( lowercase__ : int , lowercase__ : list , lowercase__ : list ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: SCREAMING_SNAKE_CASE__ : List[str] = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Any = max( lowercase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = max_revenue return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. SCREAMING_SNAKE_CASE__ : Optional[int] = [float('-inf' ) for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : int = 0 for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = max_rev[i] for j in range(1 , i + 1 ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(lowercase__ , prices[j - 1] + max_rev[i - j] ) SCREAMING_SNAKE_CASE__ : Dict = max_revenue_i return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' if n < 0: SCREAMING_SNAKE_CASE__ : Tuple = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(lowercase__ ) if n > len(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = ( 'Each integral piece of rod must have a corresponding price. ' f'''Got n = {n} but length of prices = {len(lowercase__ )}''' ) raise ValueError(lowercase__ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [6, 10, 12, 15, 20, 23] SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. SCREAMING_SNAKE_CASE__ : Optional[Any] = 36 SCREAMING_SNAKE_CASE__ : Tuple = top_down_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = bottom_up_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = naive_cut_rod_recursive(lowercase__ , lowercase__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
85
0
"""simple docstring""" from ....utils import logging lowerCamelCase_ = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE( UpperCamelCase_ ): def __init__( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=None ,SCREAMING_SNAKE_CASE__=20_48 ) -> Union[str, Any]: """simple docstring""" __SCREAMING_SNAKE_CASE :Dict = config.__dict__ __SCREAMING_SNAKE_CASE :List[str] = modal_hidden_size if num_labels: __SCREAMING_SNAKE_CASE :Optional[Any] = num_labels
498
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = CamembertTokenizer lowercase_ = CamembertTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Tuple )-> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>' SCREAMING_SNAKE_CASE__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>NOTUSED' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a_ ) , 1004 ) def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def __lowercase( self : List[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def __lowercase( self : List[str] )-> Dict: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. SCREAMING_SNAKE_CASE__ : str = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
85
0
'''simple docstring''' from collections import OrderedDict from typing import Any, Mapping, Optional, Union from ...configuration_utils import PretrainedConfig from ...feature_extraction_utils import FeatureExtractionMixin from ...onnx import OnnxConfig from ...onnx.utils import compute_effective_axis_dimension from ...tokenization_utils_base import PreTrainedTokenizerBase from ...utils import TensorType, logging A__ : Dict = logging.get_logger(__name__) A__ : List[Any] = { "deepmind/language-perceiver": "https://huggingface.co/deepmind/language-perceiver/resolve/main/config.json", # See all Perceiver models at https://huggingface.co/models?filter=perceiver } class snake_case__ ( UpperCamelCase_ ): A__ = '''perceiver''' def __init__( self : List[Any] , __a : List[Any]=256 , __a : List[Any]=1280 , __a : str=768 , __a : List[Any]=1 , __a : Tuple=26 , __a : Optional[Any]=8 , __a : str=8 , __a : int=None , __a : Dict=None , __a : Dict="kv" , __a : List[Any]=1 , __a : Dict=1 , __a : Dict="gelu" , __a : Dict=0.1 , __a : Optional[Any]=0.0_2 , __a : Dict=1e-12 , __a : Union[str, Any]=True , __a : Optional[int]=262 , __a : str=2048 , __a : List[str]=56 , __a : Dict=[368, 496] , __a : int=16 , __a : str=1920 , __a : List[str]=16 , __a : List[str]=[1, 16, 224, 224] , **__a : Union[str, Any] , ) -> Tuple: '''simple docstring''' super().__init__(**a_ ) __snake_case : Optional[int] = num_latents __snake_case : Dict = d_latents __snake_case : Optional[Any] = d_model __snake_case : str = num_blocks __snake_case : Tuple = num_self_attends_per_block __snake_case : List[str] = num_self_attention_heads __snake_case : Tuple = num_cross_attention_heads __snake_case : Any = qk_channels __snake_case : Tuple = v_channels __snake_case : Any = cross_attention_shape_for_attention __snake_case : List[str] = self_attention_widening_factor __snake_case : Dict = cross_attention_widening_factor __snake_case : Optional[Any] = hidden_act __snake_case : List[str] = attention_probs_dropout_prob __snake_case : Optional[Any] = initializer_range __snake_case : Union[str, Any] = layer_norm_eps __snake_case : Tuple = use_query_residual # masked language modeling attributes __snake_case : Optional[Any] = vocab_size __snake_case : str = max_position_embeddings # image classification attributes __snake_case : List[Any] = image_size # flow attributes __snake_case : Dict = train_size # multimodal autoencoding attributes __snake_case : Tuple = num_frames __snake_case : int = audio_samples_per_frame __snake_case : Union[str, Any] = samples_per_patch __snake_case : Optional[int] = output_shape class snake_case__ ( UpperCamelCase_ ): @property def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' if self.task == "multiple-choice": __snake_case : Optional[int] = {0: 'batch', 1: 'choice', 2: 'sequence'} else: __snake_case : List[str] = {0: 'batch', 1: 'sequence'} return OrderedDict( [ ('inputs', dynamic_axis), ('attention_mask', dynamic_axis), ] ) @property def A_ ( self : Tuple ) -> float: '''simple docstring''' return 1e-4 def A_ ( self : List[str] , __a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __a : int = -1 , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , __a : int = 3 , __a : int = 40 , __a : int = 40 , ) -> Mapping[str, Any]: '''simple docstring''' # copied from `transformers.onnx.config.OnnxConfig` and slightly altered/simplified if isinstance(a_ , a_ ): # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __snake_case : List[Any] = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 ) # If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX __snake_case : Any = preprocessor.num_special_tokens_to_add(a_ ) __snake_case : str = compute_effective_axis_dimension( a_ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a_ ) # Generate dummy inputs according to compute batch and sequence __snake_case : Union[str, Any] = [' '.join(['a'] ) * seq_length] * batch_size __snake_case : Tuple = dict(preprocessor(a_ , return_tensors=a_ ) ) __snake_case : Dict = inputs.pop('input_ids' ) return inputs elif isinstance(a_ , a_ ) and preprocessor.model_input_names[0] == "pixel_values": # If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX __snake_case : Dict = compute_effective_axis_dimension(a_ , fixed_dimension=OnnxConfig.default_fixed_batch ) __snake_case : Union[str, Any] = self._generate_dummy_images(a_ , a_ , a_ , a_ ) __snake_case : Tuple = dict(preprocessor(images=a_ , return_tensors=a_ ) ) __snake_case : str = inputs.pop('pixel_values' ) return inputs else: raise ValueError( 'Unable to generate dummy inputs for the model. Please provide a tokenizer or a preprocessor.' )
286
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"] SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
'''simple docstring''' from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) __A : Optional[int] = {"configuration_encoder_decoder": ["EncoderDecoderConfig"]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : Union[str, Any] = ["EncoderDecoderModel"] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : str = ["TFEncoderDecoderModel"] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __A : List[str] = ["FlaxEncoderDecoderModel"] if TYPE_CHECKING: from .configuration_encoder_decoder import EncoderDecoderConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_encoder_decoder import EncoderDecoderModel try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_encoder_decoder import TFEncoderDecoderModel try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel else: import sys __A : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
275
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = do_rescale SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor SCREAMING_SNAKE_CASE__ : Dict = do_pad SCREAMING_SNAKE_CASE__ : Any = pad_size def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ ) def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_pad: SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ )
85
0
from ...configuration_utils import PretrainedConfig from ...utils import logging lowercase : int = logging.get_logger(__name__) lowercase : Any = { "studio-ousia/luke-base": "https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json", "studio-ousia/luke-large": "https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json", } class __snake_case ( UpperCamelCase_ ): _a : Any= "luke" def __init__( self ,snake_case=50267 ,snake_case=500000 ,snake_case=768 ,snake_case=256 ,snake_case=12 ,snake_case=12 ,snake_case=3072 ,snake_case="gelu" ,snake_case=0.1 ,snake_case=0.1 ,snake_case=512 ,snake_case=2 ,snake_case=0.02 ,snake_case=1e-12 ,snake_case=True ,snake_case=None ,snake_case=1 ,snake_case=0 ,snake_case=2 ,**snake_case ,): '''simple docstring''' super().__init__(pad_token_id=a_ ,bos_token_id=a_ ,eos_token_id=a_ ,**a_ ) lowercase : Tuple = vocab_size lowercase : List[Any] = entity_vocab_size lowercase : List[str] = hidden_size lowercase : List[str] = entity_emb_size lowercase : Any = num_hidden_layers lowercase : str = num_attention_heads lowercase : str = hidden_act lowercase : List[Any] = intermediate_size lowercase : Union[str, Any] = hidden_dropout_prob lowercase : str = attention_probs_dropout_prob lowercase : int = max_position_embeddings lowercase : List[str] = type_vocab_size lowercase : Any = initializer_range lowercase : Optional[Any] = layer_norm_eps lowercase : Tuple = use_entity_aware_attention lowercase : List[str] = classifier_dropout
336
from pathlib import Path import numpy as np from PIL import Image def _a ( lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return (gray > 1_27) & (gray <= 2_55) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE__ : Optional[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE__ : List[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 ) return output if __name__ == "__main__": # read original image SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg" SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path)) # kernel to be applied SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
85
0
"""simple docstring""" import argparse import dataclasses import json import logging import os import shutil from typing import List, Optional import datasets from accelerate import Accelerator from datasets import load_dataset from finetuning import finetune from tqdm.auto import tqdm import transformers from transformers import AutoConfig, set_seed from transformers.trainer_utils import IntervalStrategy _A = logging.getLogger(__name__) _A = "pytorch_model.bin" @dataclasses.dataclass class __UpperCAmelCase : """simple docstring""" _snake_case : Optional[Any] = dataclasses.field( metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} ) _snake_case : List[Any] = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , ) @dataclasses.dataclass class __UpperCAmelCase : """simple docstring""" _snake_case : Any = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} ) _snake_case : List[Any] = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} ) _snake_case : Optional[int] = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'A csv or a json file containing the validation data.'} ) _snake_case : Optional[Any] = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'The name of the task to train on.'} , ) _snake_case : Optional[Any] = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'The list of labels for the task.'} ) @dataclasses.dataclass class __UpperCAmelCase : """simple docstring""" _snake_case : Any = dataclasses.field( metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} ) _snake_case : int = dataclasses.field( default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} ) _snake_case : int = dataclasses.field( default='no' , metadata={ 'help': 'The evaluation strategy to adopt during training. Possible values are: ["no", "step", "epoch]' } , ) _snake_case : List[Any] = dataclasses.field( default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) _snake_case : Union[str, Any] = dataclasses.field( default=0.0 , metadata={ 'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.' } , ) _snake_case : str = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , ) _snake_case : List[Any] = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , ) _snake_case : List[str] = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , ) _snake_case : int = dataclasses.field( default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , ) _snake_case : Any = dataclasses.field( default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , ) _snake_case : Dict = dataclasses.field( default=UpperCamelCase_ , metadata={'help': 'Random seed for initialization.'} , ) def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> List[Any]: '''simple docstring''' __UpperCamelCase = datasets.concatenate_datasets([infer_input, infer_output] ,axis=1 ) if args.do_filter_by_confidence: __UpperCamelCase = dataset.filter(lambda _snake_case : example["probability"] > args.confidence_threshold ) if args.do_filter_by_val_performance: assert eval_result >= 0.0 and eval_result <= 1.0 __UpperCamelCase = int(eval_result * len(lowercase__ ) ) print(lowercase__ ) __UpperCamelCase = dataset.sort("probability" ,reverse=lowercase__ ) __UpperCamelCase = dataset.select(range(lowercase__ ) ) __UpperCamelCase = dataset.remove_columns(["label", "probability"] ) __UpperCamelCase = dataset.rename_column("prediction" ,"label" ) __UpperCamelCase = dataset.map(lambda _snake_case : {"label": idalabel[example["label"]]} ) __UpperCamelCase = dataset.shuffle(seed=args.seed ) __UpperCamelCase = os.path.join(lowercase__ ,f"""train_pseudo.{args.data_file_extension}""" ) if args.data_file_extension == "csv": dataset.to_csv(lowercase__ ,index=lowercase__ ) else: dataset.to_json(lowercase__ ) def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ,**_snake_case ) -> Any: '''simple docstring''' __UpperCamelCase = Accelerator() # Make one log on every process with the configuration for debugging. logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,level=logging.INFO ,) logger.info(accelerator.state ) # Setup logging, we only want one process per machine to log things on the # screen. accelerator.is_local_main_process is only True for one process per # machine. logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_info() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() __UpperCamelCase = STModelArguments(model_name_or_path=lowercase__ ) __UpperCamelCase = STDataArguments(train_file=lowercase__ ,infer_file=lowercase__ ) __UpperCamelCase = STTrainingArguments(output_dir=lowercase__ ) __UpperCamelCase = argparse.Namespace() for arg_class in (model_args, data_args, training_args): for key, value in vars(lowercase__ ).items(): setattr(lowercase__ ,lowercase__ ,lowercase__ ) for key, value in kwargs.items(): if hasattr(lowercase__ ,lowercase__ ): setattr(lowercase__ ,lowercase__ ,lowercase__ ) # Sanity checks __UpperCamelCase = {} __UpperCamelCase = None # You need to provide the training data and the data to predict on assert args.train_file is not None assert args.infer_file is not None __UpperCamelCase = args.train_file __UpperCamelCase = args.infer_file if args.evaluation_strategy != IntervalStrategy.NO.value: assert args.eval_file is not None __UpperCamelCase = args.eval_file for key in data_files: __UpperCamelCase = data_files[key].split("." )[-1] assert extension in ["csv", "json"], f"""`{key}_file` should be a csv or a json file.""" if args.data_file_extension is None: __UpperCamelCase = extension else: assert extension == args.data_file_extension, f"""`{key}_file` should be a {args.data_file_extension} file`.""" assert ( args.eval_metric in datasets.list_metrics() ), f"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}.""" # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed ) logger.info("Creating the initial data directory for self-training..." ) __UpperCamelCase = f"""{args.output_dir}/self-train_iter-{{}}""".format __UpperCamelCase = data_dir_format(0 ) if accelerator.is_main_process: if args.output_dir is not None: os.makedirs(args.output_dir ,exist_ok=lowercase__ ) os.makedirs(lowercase__ ,exist_ok=lowercase__ ) accelerator.wait_for_everyone() __UpperCamelCase = None __UpperCamelCase = None __UpperCamelCase = 0 __UpperCamelCase = False # Show the progress bar __UpperCamelCase = tqdm(range(args.max_selftrain_iterations ) ,disable=not accelerator.is_local_main_process ) # Self-train for iteration in range(0 ,int(args.max_selftrain_iterations ) ): __UpperCamelCase = data_dir_format(lowercase__ ) assert os.path.exists(lowercase__ ) # Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for # iteration > 0 __UpperCamelCase = os.path.join(lowercase__ ,"stage-1" ) __UpperCamelCase = { 'accelerator': accelerator, 'model_name_or_path': args.model_name_or_path, 'cache_dir': args.cache_dir, 'do_train': True, 'train_file': data_files['train'] if iteration == 0 else data_files['train_pseudo'], 'do_eval': True if args.eval_file is not None else False, 'eval_file': data_files['eval'], 'do_predict': True, 'infer_file': data_files['infer'], 'task_name': args.task_name, 'label_list': args.label_list, 'output_dir': current_output_dir, 'eval_metric': args.eval_metric, 'evaluation_strategy': args.evaluation_strategy, 'early_stopping_patience': args.early_stopping_patience, 'early_stopping_threshold': args.early_stopping_threshold, 'seed': args.seed, } # Add additional training arguments for key, value in kwargs.items(): if key not in arguments_dict and not hasattr(lowercase__ ,lowercase__ ): arguments_dict.update({key: value} ) __UpperCamelCase = os.path.join(lowercase__ ,"best-checkpoint" ,lowercase__ ) if os.path.exists(lowercase__ ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1." ,lowercase__ ,lowercase__ ,) else: logger.info("***** Running self-training: iteration: %d, stage: 1 *****" ,lowercase__ ) finetune(**lowercase__ ) accelerator.wait_for_everyone() assert os.path.exists(lowercase__ ) logger.info("Self-training job completed: iteration: %d, stage: 1." ,lowercase__ ) if iteration > 0 and args.finetune_on_labeled_data: # Stage 2 (optional): fine-tuning on the original labeled data __UpperCamelCase = os.path.join(lowercase__ ,"best-checkpoint" ) __UpperCamelCase = os.path.join(lowercase__ ,"stage-2" ) # Update arguments_dict __UpperCamelCase = model_path __UpperCamelCase = data_files['train'] __UpperCamelCase = current_output_dir __UpperCamelCase = os.path.join(lowercase__ ,"best-checkpoint" ,lowercase__ ) if os.path.exists(lowercase__ ): logger.info( "Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2." ,lowercase__ ,lowercase__ ,) else: logger.info("***** Running self-training: iteration: %d, stage: 2 *****" ,lowercase__ ) finetune(**lowercase__ ) accelerator.wait_for_everyone() assert os.path.exists(lowercase__ ) logger.info("Self-training job completed: iteration: %d, stage: 2." ,lowercase__ ) __UpperCamelCase = iteration __UpperCamelCase = data_dir_format(iteration + 1 ) __UpperCamelCase = AutoConfig.from_pretrained(os.path.join(lowercase__ ,"best-checkpoint" ) ) __UpperCamelCase = config.idalabel __UpperCamelCase = os.path.join(lowercase__ ,"eval_results_best-checkpoint.json" ) __UpperCamelCase = os.path.join(lowercase__ ,"test_results_best-checkpoint.json" ) assert os.path.exists(lowercase__ ) with open(lowercase__ ,"r" ) as f: __UpperCamelCase = float(json.load(lowercase__ )[args.eval_metric] ) __UpperCamelCase = os.path.join(lowercase__ ,"infer_output_best-checkpoint.csv" ) assert os.path.exists(lowercase__ ) # Loading the dataset from local csv or json files. __UpperCamelCase = load_dataset(args.data_file_extension ,data_files={"data": data_files["infer"]} )['data'] __UpperCamelCase = load_dataset("csv" ,data_files={"data": infer_output_file} )['data'] if accelerator.is_main_process: os.makedirs(lowercase__ ,exist_ok=lowercase__ ) shutil.copy(lowercase__ ,os.path.join(lowercase__ ,f"""eval_results_iter-{iteration}.json""" ) ) if os.path.exists(lowercase__ ): shutil.copy(lowercase__ ,os.path.join(lowercase__ ,f"""test_results_iter-{iteration}.json""" ) ) create_pseudo_labeled_data(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ) accelerator.wait_for_everyone() __UpperCamelCase = os.path.join(lowercase__ ,f"""train_pseudo.{args.data_file_extension}""" ) if args.evaluation_strategy != IntervalStrategy.NO.value: __UpperCamelCase = eval_result if best_iteration is None: __UpperCamelCase = new_iteration __UpperCamelCase = new_eval_result else: if new_eval_result - best_eval_result > args.early_stopping_threshold: __UpperCamelCase = new_iteration __UpperCamelCase = new_eval_result __UpperCamelCase = 0 else: if new_eval_result == best_eval_result: __UpperCamelCase = new_iteration __UpperCamelCase = new_eval_result early_stopping_patience_counter += 1 if early_stopping_patience_counter >= args.early_stopping_patience: __UpperCamelCase = True progress_bar.update(1 ) if should_training_stop: break if best_iteration is not None: # Save the best iteration logger.info("Best iteration: %d" ,lowercase__ ) logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowercase__ ,f"""eval_results_iter-{iteration}.json""" ) ,os.path.join(lowercase__ ,"eval_results_best-iteration.json" ) ,) else: # Assume that the last iteration is the best logger.info("Best iteration: %d" ,args.max_selftrain_iterations - 1 ) logger.info("Best evaluation result: %s = %f" ,args.eval_metric ,lowercase__ ) accelerator.wait_for_everyone() if accelerator.is_main_process: shutil.copy( os.path.join(lowercase__ ,f"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) ,os.path.join(lowercase__ ,"eval_results_best-iteration.json" ) ,)
505
def _a ( lowercase__ : int = 60_08_51_47_51_43 ): '''simple docstring''' try: SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) SCREAMING_SNAKE_CASE__ : int = 2 SCREAMING_SNAKE_CASE__ : int = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 SCREAMING_SNAKE_CASE__ : str = i while n % i == 0: SCREAMING_SNAKE_CASE__ : List[Any] = n // i i += 1 return int(lowercase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
85
0
import argparse import torch from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert from transformers.utils import logging logging.set_verbosity_info() def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ : Optional[Any] = LxmertConfig.from_json_file(lowercase__ ) print(F"Building PyTorch model from configuration: {config}" ) lowerCamelCase_ : Any = LxmertForPreTraining(lowercase__ ) # Load weights from tf checkpoint load_tf_weights_in_lxmert(lowercase__ ,lowercase__ ,lowercase__ ) # Save pytorch-model print(F"Save PyTorch model to {pytorch_dump_path}" ) torch.save(model.state_dict() ,lowercase__ ) if __name__ == "__main__": _lowercase : Union[str, Any] =argparse.ArgumentParser() # Required parameters parser.add_argument( """--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""" ) parser.add_argument( """--config_file""", default=None, type=str, required=True, help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""", ) parser.add_argument( """--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowercase : str =parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
364
def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = int(lowercase__ ) if n_element < 1: SCREAMING_SNAKE_CASE__ : Tuple = ValueError('a should be a positive number' ) raise my_error SCREAMING_SNAKE_CASE__ : Any = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (0, 0, 0) SCREAMING_SNAKE_CASE__ : Any = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") SCREAMING_SNAKE_CASE__ : int = hamming(int(n)) print("-----------------------------------------------------") print(F"""The list with nth numbers is: {hamming_numbers}""") print("-----------------------------------------------------")
85
0
'''simple docstring''' from scipy.stats import spearmanr import datasets lowercase : List[str] = "\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n" lowercase : Optional[Any] = "\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {'spearmanr': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric(\"spearmanr\")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results['spearmanr'])\n -0.7\n >>> print(round(results['spearmanr_pvalue'], 2))\n 0.19\n" lowercase : Optional[Any] = r"\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION ) class _a (datasets.Metric ): '''simple docstring''' def snake_case_ ( self ) -> str: return datasets.MetricInfo( description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features( { """predictions""": datasets.Value("""float""" ), """references""": datasets.Value("""float""" ), } ) ,reference_urls=["""https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html"""] ,) def snake_case_ ( self ,__a ,__a ,__a=False ) -> int: snake_case : List[Any] = spearmanr(a_ ,a_ ) if return_pvalue: return {"spearmanr": results[0], "spearmanr_pvalue": results[1]} else: return {"spearmanr": results[0]}
116
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
from collections.abc import Sequence from queue import Queue class SCREAMING_SNAKE_CASE__ : def __init__( self : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : str , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any]=None , __lowerCamelCase : List[str]=None ): """simple docstring""" lowerCAmelCase__ = start lowerCAmelCase__ = end lowerCAmelCase__ = val lowerCAmelCase__ = (start + end) // 2 lowerCAmelCase__ = left lowerCAmelCase__ = right def __repr__( self : List[Any] ): """simple docstring""" return F"""SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})""" class SCREAMING_SNAKE_CASE__ : def __init__( self : Tuple , __lowerCamelCase : Sequence , __lowerCamelCase : Dict ): """simple docstring""" lowerCAmelCase__ = collection lowerCAmelCase__ = function if self.collection: lowerCAmelCase__ = self._build_tree(0 , len(a_ ) - 1 ) def A__ ( self : Union[str, Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ): """simple docstring""" self._update_tree(self.root , a_ , a_ ) def A__ ( self : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[Any] ): """simple docstring""" return self._query_range(self.root , a_ , a_ ) def A__ ( self : int , __lowerCamelCase : str , __lowerCamelCase : List[str] ): """simple docstring""" if start == end: return SegmentTreeNode(a_ , a_ , self.collection[start] ) lowerCAmelCase__ = (start + end) // 2 lowerCAmelCase__ = self._build_tree(a_ , a_ ) lowerCAmelCase__ = self._build_tree(mid + 1 , a_ ) return SegmentTreeNode(a_ , a_ , self.fn(left.val , right.val ) , a_ , a_ ) def A__ ( self : str , __lowerCamelCase : str , __lowerCamelCase : List[Any] , __lowerCamelCase : str ): """simple docstring""" if node.start == i and node.end == i: lowerCAmelCase__ = val return if i <= node.mid: self._update_tree(node.left , a_ , a_ ) else: self._update_tree(node.right , a_ , a_ ) lowerCAmelCase__ = self.fn(node.left.val , node.right.val ) def A__ ( self : Any , __lowerCamelCase : Optional[int] , __lowerCamelCase : Tuple , __lowerCamelCase : Optional[Any] ): """simple docstring""" if node.start == i and node.end == j: return node.val if i <= node.mid: if j <= node.mid: # range in left child tree return self._query_range(node.left , a_ , a_ ) else: # range in left child tree and right child tree return self.fn( self._query_range(node.left , a_ , node.mid ) , self._query_range(node.right , node.mid + 1 , a_ ) , ) else: # range in right child tree return self._query_range(node.right , a_ , a_ ) def A__ ( self : List[Any] ): """simple docstring""" if self.root is not None: lowerCAmelCase__ = Queue() queue.put(self.root ) while not queue.empty(): lowerCAmelCase__ = queue.get() yield node if node.left is not None: queue.put(node.left ) if node.right is not None: queue.put(node.right ) if __name__ == "__main__": import operator for fn in [operator.add, max, min]: print("""*""" * 50) __magic_name__ : Any = SegmentTree([2, 1, 5, 3, 4], fn) for node in arr.traverse(): print(node) print() arr.update(1, 5) for node in arr.traverse(): print(node) print() print(arr.query_range(3, 4)) # 7 print(arr.query_range(2, 2)) # 5 print(arr.query_range(1, 3)) # 13 print()
615
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[str] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" import os def _snake_case ( _snake_case : str = "input.txt" ) -> int: '''simple docstring''' with open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) as input_file: _A = [ [int(lowercase__ ) for element in line.split(',' )] for line in input_file.readlines() ] _A = len(lowercase__ ) _A = len(matrix[0] ) _A = [[-1 for _ in range(lowercase__ )] for _ in range(lowercase__ )] for i in range(lowercase__ ): _A = matrix[i][0] for j in range(1 , lowercase__ ): for i in range(lowercase__ ): _A = minimal_path_sums[i][j - 1] + matrix[i][j] for i in range(1 , lowercase__ ): _A = min( minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] ) for i in range(rows - 2 , -1 , -1 ): _A = min( minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] ) return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums ) if __name__ == "__main__": print(F'''{solution() = }''')
7
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg") SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def _a ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def _a ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def _a ( ): '''simple docstring''' assert med.median_filter(lowercase__ , 3 ).any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
85
0
from dataclasses import dataclass from typing import Optional, Tuple, Union import numpy as np import torch from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput, randn_tensor from .scheduling_utils import SchedulerMixin @dataclass class lowerCamelCase (UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ = 42 UpperCAmelCase_ = 42 UpperCAmelCase_ = None class lowerCamelCase (UpperCamelCase_ , UpperCamelCase_ ): """simple docstring""" UpperCAmelCase_ = 2 @register_to_config def __init__( self : Optional[Any], _UpperCAmelCase : float = 0.02, _UpperCAmelCase : float = 1_0_0, _UpperCAmelCase : float = 1.007, _UpperCAmelCase : float = 8_0, _UpperCAmelCase : float = 0.05, _UpperCAmelCase : float = 5_0, ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = sigma_max # setable values SCREAMING_SNAKE_CASE__ : int = None SCREAMING_SNAKE_CASE__ : np.IntTensor = None SCREAMING_SNAKE_CASE__ : torch.FloatTensor = None # sigma(t_i) def A_ ( self : Optional[Any], _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : Optional[int] = None ) -> torch.FloatTensor: """simple docstring""" return sample def A_ ( self : Any, _UpperCAmelCase : int, _UpperCAmelCase : Union[str, torch.device] = None ) -> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = num_inference_steps SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.arange(0, self.num_inference_steps )[::-1].copy() SCREAMING_SNAKE_CASE__ : Any = torch.from_numpy(a_ ).to(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [ ( self.config.sigma_max**2 * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) ) for i in self.timesteps ] SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(a_, dtype=torch.floataa, device=a_ ) def A_ ( self : Tuple, _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : float, _UpperCAmelCase : Optional[torch.Generator] = None ) -> Tuple[torch.FloatTensor, float]: """simple docstring""" if self.config.s_min <= sigma <= self.config.s_max: SCREAMING_SNAKE_CASE__ : Optional[Any] = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1 ) else: SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0 # sample eps ~ N(0, S_noise^2 * I) SCREAMING_SNAKE_CASE__ : Any = self.config.s_noise * randn_tensor(sample.shape, generator=a_ ).to(sample.device ) SCREAMING_SNAKE_CASE__ : List[str] = sigma + gamma * sigma SCREAMING_SNAKE_CASE__ : Union[str, Any] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) return sample_hat, sigma_hat def A_ ( self : Dict, _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : float, _UpperCAmelCase : float, _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : bool = True, ) -> Union[KarrasVeOutput, Tuple]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = sample_hat + sigma_hat * model_output SCREAMING_SNAKE_CASE__ : Dict = (sample_hat - pred_original_sample) / sigma_hat SCREAMING_SNAKE_CASE__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * derivative if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=a_, derivative=a_, pred_original_sample=a_ ) def A_ ( self : Optional[int], _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : float, _UpperCAmelCase : float, _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : torch.FloatTensor, _UpperCAmelCase : bool = True, ) -> Union[KarrasVeOutput, Tuple]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = sample_prev + sigma_prev * model_output SCREAMING_SNAKE_CASE__ : Dict = (sample_prev - pred_original_sample) / sigma_prev SCREAMING_SNAKE_CASE__ : Optional[int] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) if not return_dict: return (sample_prev, derivative) return KarrasVeOutput( prev_sample=a_, derivative=a_, pred_original_sample=a_ ) def A_ ( self : Tuple, _UpperCAmelCase : int, _UpperCAmelCase : Tuple, _UpperCAmelCase : Union[str, Any] ) -> Optional[int]: """simple docstring""" raise NotImplementedError()
663
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: SCREAMING_SNAKE_CASE__ : Tuple = json.load(f) @require_torch class snake_case ( unittest.TestCase ): def __lowercase( self : List[str] , a_ : Any )-> str: """simple docstring""" return FSMTTokenizer.from_pretrained(a_ ) def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]: """simple docstring""" # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ ) SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src'] SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt'] SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode( a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ ) print(a_ ) self.assertGreaterEqual(scores['bleu'] , a_ )
85
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class a ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE : Optional[int] = StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"""height""", """width""", """cross_attention_kwargs"""} SCREAMING_SNAKE_CASE : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE : Dict = IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS def UpperCamelCase ( self : str ) -> int: torch.manual_seed(0 ) lowerCamelCase_ = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) lowerCamelCase_ = PNDMScheduler(skip_prk_steps=a_ ) torch.manual_seed(0 ) lowerCamelCase_ = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) lowerCamelCase_ = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) lowerCamelCase_ = CLIPTextModel(a_ ) lowerCamelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) lowerCamelCase_ = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[Any]=0 ) -> int: lowerCamelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0] lowerCamelCase_ = Image.fromarray(np.uinta(a_ ) ).convert('RGB' ) if str(a_ ).startswith('mps' ): lowerCamelCase_ = torch.manual_seed(a_ ) else: lowerCamelCase_ = torch.Generator(device=a_ ).manual_seed(a_ ) lowerCamelCase_ = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def UpperCamelCase ( self : str ) -> Optional[Any]: lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**a_ ) lowerCamelCase_ = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ = self.get_dummy_inputs(a_ ) lowerCamelCase_ = sd_pipe(**a_ ).images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([0.7_526, 0.3_750, 0.4_547, 0.6_117, 0.5_866, 0.5_016, 0.4_327, 0.5_642, 0.4_815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self : Optional[Any] ) -> int: lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**a_ ) lowerCamelCase_ = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ = self.get_dummy_inputs(a_ ) lowerCamelCase_ = 'french fries' lowerCamelCase_ = sd_pipe(**a_ , negative_prompt=a_ ) lowerCamelCase_ = output.images lowerCamelCase_ = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([0.7_511, 0.3_642, 0.4_553, 0.6_236, 0.5_797, 0.5_013, 0.4_343, 0.5_611, 0.4_831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self : List[Any] ) -> List[str]: lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**a_ ) lowerCamelCase_ = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ = self.get_dummy_inputs(a_ ) lowerCamelCase_ = [inputs['prompt']] * 2 lowerCamelCase_ = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 lowerCamelCase_ = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ ) lowerCamelCase_ = image / 2 + 0.5 lowerCamelCase_ = image.permute(0 , 3 , 1 , 2 ) lowerCamelCase_ = image.repeat(2 , 1 , 1 , 1 ) lowerCamelCase_ = sd_pipe(**a_ ).images lowerCamelCase_ = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) lowerCamelCase_ = np.array([0.5_812, 0.5_748, 0.5_222, 0.5_908, 0.5_695, 0.7_174, 0.6_804, 0.5_523, 0.5_579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self : List[Any] ) -> List[Any]: lowerCamelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = EulerAncestralDiscreteScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule='scaled_linear' ) lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**a_ ) lowerCamelCase_ = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ = self.get_dummy_inputs(a_ ) lowerCamelCase_ = sd_pipe(**a_ ).images lowerCamelCase_ = image[0, -3:, -3:, -1] lowerCamelCase_ = [round(a_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(a_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) lowerCamelCase_ = np.array([0.7_417, 0.3_842, 0.4_732, 0.5_776, 0.5_891, 0.5_139, 0.4_052, 0.5_673, 0.4_986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def UpperCamelCase ( self : Union[str, Any] ) -> Any: super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def UpperCamelCase ( self : List[Any] ) -> Dict: lowerCamelCase_ = self.get_dummy_components() lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline(**a_ ) lowerCamelCase_ = VaeImageProcessor(do_resize=a_ , do_normalize=a_ ) lowerCamelCase_ = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) lowerCamelCase_ = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0] lowerCamelCase_ = components['vae'] lowerCamelCase_ = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): lowerCamelCase_ = vae.encode(inputs[image_param] ).latent_dist.mode() lowerCamelCase_ = pipe(**a_ )[0] lowerCamelCase_ = np.abs(out - out_latents_inputs ).max() self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class a ( unittest.TestCase ): def UpperCamelCase ( self : Tuple ) -> Dict: super().tearDown() gc.collect() torch.cuda.empty_cache() def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Dict=0 ) -> Any: lowerCamelCase_ = torch.manual_seed(a_ ) lowerCamelCase_ = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) lowerCamelCase_ = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def UpperCamelCase ( self : int ) -> Optional[int]: lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() lowerCamelCase_ = self.get_inputs() lowerCamelCase_ = pipe(**a_ ).images lowerCamelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.5_902, 0.6_015, 0.6_027, 0.5_983, 0.6_092, 0.6_061, 0.5_765, 0.5_785, 0.5_555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCamelCase ( self : Dict ) -> str: lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) lowerCamelCase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() lowerCamelCase_ = self.get_inputs() lowerCamelCase_ = pipe(**a_ ).images lowerCamelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.6_578, 0.6_817, 0.6_972, 0.6_761, 0.6_856, 0.6_916, 0.6_428, 0.6_516, 0.6_301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) lowerCamelCase_ = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() lowerCamelCase_ = self.get_inputs() lowerCamelCase_ = pipe(**a_ ).images lowerCamelCase_ = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) lowerCamelCase_ = np.array([0.3_828, 0.3_834, 0.3_818, 0.3_792, 0.3_865, 0.3_752, 0.3_792, 0.3_847, 0.3_753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def UpperCamelCase ( self : int ) -> List[str]: lowerCamelCase_ = 0 def callback_fn(__SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : torch.FloatTensor ) -> None: lowerCamelCase_ = True nonlocal number_of_steps number_of_steps += 1 if step == 1: lowerCamelCase_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ = latents[0, -3:, -3:, -1] lowerCamelCase_ = np.array([-0.2_463, -0.4_644, -0.9_756, 1.5_176, 1.4_414, 0.7_866, 0.9_897, 0.8_521, 0.7_983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: lowerCamelCase_ = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) lowerCamelCase_ = latents[0, -3:, -3:, -1] lowerCamelCase_ = np.array([-0.2_644, -0.4_626, -0.9_653, 1.5_176, 1.4_551, 0.7_686, 0.9_805, 0.8_452, 0.8_115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 lowerCamelCase_ = False lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() lowerCamelCase_ = self.get_inputs() pipe(**a_ , callback=a_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def UpperCamelCase ( self : int ) -> Any: torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) lowerCamelCase_ = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() lowerCamelCase_ = self.get_inputs() lowerCamelCase_ = pipe(**a_ ) lowerCamelCase_ = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def UpperCamelCase ( self : Tuple ) -> List[Any]: lowerCamelCase_ = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 lowerCamelCase_ = inputs['image'].resize((504, 504) ) lowerCamelCase_ = 'timbrooks/instruct-pix2pix' lowerCamelCase_ = StableDiffusionInstructPixaPixPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() lowerCamelCase_ = pipe(**a_ ) lowerCamelCase_ = output.images[0] lowerCamelCase_ = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) lowerCamelCase_ = np.array([0.2_726, 0.2_529, 0.2_664, 0.2_655, 0.2_641, 0.2_642, 0.2_591, 0.2_649, 0.2_590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
549
import os import pytest from attr import dataclass SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region @dataclass class snake_case : lowercase_ = 42 lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' lowercase_ = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } lowercase_ = {**hyperparameters, 'max_steps': 1_000} @property def __lowercase( self : List[str] )-> str: """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" return F'''{self.framework}-transfromers-test''' @property def __lowercase( self : int )-> str: """simple docstring""" return F'''./tests/sagemaker/scripts/{self.framework}''' @property def __lowercase( self : Tuple )-> str: """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def _a ( lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
85
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCamelCase_ = imread(r"digital_image_processing/image_data/lena_small.jpg") lowerCamelCase_ = cvtColor(img, COLOR_BGR2GRAY) def __lowerCamelCase ( ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE :Any = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def __lowerCamelCase ( ) -> Union[str, Any]: with Image.open('''digital_image_processing/image_data/lena_small.jpg''' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith( '''<PIL.Image.Image image mode=RGB size=100x100 at''' ) def __lowerCamelCase ( ) -> Optional[int]: __SCREAMING_SNAKE_CASE :str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def __lowerCamelCase ( ) -> Dict: __SCREAMING_SNAKE_CASE :Optional[int] = imread('''digital_image_processing/image_data/lena_small.jpg''' , 0 ) # assert ambiguous array for all == True assert canny_img.all() __SCREAMING_SNAKE_CASE :List[str] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def __lowerCamelCase ( ) -> Optional[Any]: assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def __lowerCamelCase ( ) -> str: __SCREAMING_SNAKE_CASE :Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) __SCREAMING_SNAKE_CASE :Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def __lowerCamelCase ( ) -> Dict: assert med.median_filter(lowercase__ , 3 ).any() def __lowerCamelCase ( ) -> Tuple: __SCREAMING_SNAKE_CASE :int = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def __lowerCamelCase ( ) -> Dict: __SCREAMING_SNAKE_CASE :List[str] = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def __lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" ) -> str: __SCREAMING_SNAKE_CASE :str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def __lowerCamelCase ( a_ : str = "digital_image_processing/image_data/lena_small.jpg" , ) -> List[str]: __SCREAMING_SNAKE_CASE :Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def __lowerCamelCase ( ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE :Dict = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. __SCREAMING_SNAKE_CASE :Dict = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None __SCREAMING_SNAKE_CASE :str = 0 __SCREAMING_SNAKE_CASE :Dict = 0 __SCREAMING_SNAKE_CASE :Any = image[x_coordinate][y_coordinate] __SCREAMING_SNAKE_CASE :List[Any] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image __SCREAMING_SNAKE_CASE :Optional[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): __SCREAMING_SNAKE_CASE :str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
498
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = FunnelTokenizer lowercase_ = FunnelTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : str = [ '<unk>', '<cls>', '<sep>', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase( self : Any , **a_ : Any )-> List[str]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Tuple , **a_ : List[Any] )-> List[Any]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE__ : int = 'unwanted, running' return input_text, output_text def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(do_lower_case=a_ ) for tokenizer in tokenizers: SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer('UNwant\u00E9d,running' ) SCREAMING_SNAKE_CASE__ : List[Any] = len(inputs['input_ids'] ) - 1 self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' ) self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
85
0
'''simple docstring''' import faiss # noqa: F401 # Here to have a nice missing dependency error message early on import numpy # noqa: F401 # Here to have a nice missing dependency error message early on import requests # noqa: F401 # Here to have a nice missing dependency error message early on import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on from mauve import compute_mauve # From: mauve-text import datasets A__ : Optional[int] = "\\n@inproceedings{pillutla-etal:mauve:neurips2021,\n title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},\n author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},\n booktitle = {NeurIPS},\n year = {2021}\n}\n\n" A__ : Tuple = "\\nMAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.\n\nMAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.\n\nFor details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).\n\nThis metrics is a wrapper around the official implementation of MAUVE:\nhttps://github.com/krishnap25/mauve\n" A__ : Optional[Any] = "\nCalculates MAUVE scores between two lists of generated text and reference text.\nArgs:\n predictions: list of generated text to score. Each predictions\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\nOptional Args:\n num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer\n pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1\n kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9\n kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5\n kmeans_max_iter: maximum number of k-means iterations. Default 500\n featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].\n device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU\n max_text_length: maximum number of tokens to consider. Default 1024\n divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25\n mauve_scaling_factor: \"c\" from the paper. Default 5.\n verbose: If True (default), print running time updates\n seed: random seed to initialize k-means cluster assignments.\nReturns:\n mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,\n frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,\n divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,\n p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,\n q_hist: same as above, but with q_text.\nExamples:\n\n >>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest\n >>> import datasets\n >>> mauve = datasets.load_metric('mauve')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP\n >>> print(out.mauve) # doctest: +SKIP\n 1.0\n" @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class snake_case__ ( datasets.Metric ): def A_ ( self : int ) -> Tuple: '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { 'predictions': datasets.Value('string' , id='sequence' ), 'references': datasets.Value('string' , id='sequence' ), } ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[ 'https://arxiv.org/abs/2102.01454', 'https://github.com/krishnap25/mauve', ] , ) def A_ ( self : Optional[int] , __a : int , __a : Union[str, Any] , __a : Optional[int]=None , __a : Union[str, Any]=None , __a : Tuple=None , __a : int=None , __a : Any="auto" , __a : List[Any]=-1 , __a : str=0.9 , __a : str=5 , __a : List[Any]=500 , __a : Dict="gpt2-large" , __a : Tuple=-1 , __a : List[Any]=1024 , __a : Dict=25 , __a : Optional[int]=5 , __a : Any=True , __a : List[str]=25 , ) -> Union[str, Any]: '''simple docstring''' __snake_case : List[Any] = compute_mauve( p_text=a_ , q_text=a_ , p_features=a_ , q_features=a_ , p_tokens=a_ , q_tokens=a_ , num_buckets=a_ , pca_max_data=a_ , kmeans_explained_var=a_ , kmeans_num_redo=a_ , kmeans_max_iter=a_ , featurize_model_name=a_ , device_id=a_ , max_text_length=a_ , divergence_curve_discretization_size=a_ , mauve_scaling_factor=a_ , verbose=a_ , seed=a_ , ) return out
286
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case ( UpperCamelCase_ ): lowercase_ = 'levit' def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : Any = image_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Any = kernel_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride SCREAMING_SNAKE_CASE__ : Any = padding SCREAMING_SNAKE_CASE__ : Any = hidden_sizes SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = key_dim SCREAMING_SNAKE_CASE__ : int = drop_path_rate SCREAMING_SNAKE_CASE__ : List[str] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case ( UpperCamelCase_ ): lowercase_ = version.parse('1.11' ) @property def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowercase( self : Any )-> float: """simple docstring""" return 1e-4
85
0
'''simple docstring''' from __future__ import annotations def UpperCamelCase_ ( A__ : list ): '''simple docstring''' if not nums: raise ValueError("""List is empty""" ) return sum(lowercase__ ) / len(lowercase__ ) if __name__ == "__main__": import doctest doctest.testmod()
275
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = StableDiffusionInstructPixaPixPipeline lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowercase( self : str )-> int: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ ) SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE__ : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' ) if str(a_ ).startswith('mps' ): SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries' SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ ) SCREAMING_SNAKE_CASE__ : Dict = output.images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2 SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5 SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(a_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae'] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def __lowercase( self : Tuple )-> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) SCREAMING_SNAKE_CASE__ : Tuple = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 0 def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None: SCREAMING_SNAKE_CASE__ : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() pipe(**a_ , callback=a_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __lowercase( self : int )-> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) ) SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
85
0
from heapq import heappop, heappush import numpy as np def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Tuple: lowercase : Union[str, Any] = grid.shape lowercase : Optional[Any] = [-1, 1, 0, 0] lowercase : Tuple = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] lowercase : List[Any] = [(0, source)], set() lowercase : str = np.full((rows, cols) , np.inf ) lowercase : List[Any] = 0 lowercase : Dict = np.empty((rows, cols) , dtype=lowercase__ ) lowercase : List[Any] = None while queue: (lowercase) : Optional[Any] = heappop(lowercase__ ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: lowercase : Any = [] while (x, y) != source: path.append((x, y) ) lowercase : Tuple = predecessors[x, y] path.append(lowercase__ ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(lowercase__ ) ): lowercase : Union[str, Any] = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: lowercase : Optional[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(lowercase__ , (dist + 1, (nx, ny)) ) lowercase : List[str] = dist + 1 lowercase : Any = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
336
import math from collections.abc import Callable def _a ( lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : float = xa SCREAMING_SNAKE_CASE__ : float = xa while True: if x_n == x_na or function(lowercase__ ) == function(lowercase__ ): raise ZeroDivisionError('float division by zero, could not find root' ) SCREAMING_SNAKE_CASE__ : float = x_na - ( function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na SCREAMING_SNAKE_CASE__ : Dict = x_na SCREAMING_SNAKE_CASE__ : List[str] = x_na def _a ( lowercase__ : float ): '''simple docstring''' return math.pow(lowercase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
85
0
"""simple docstring""" from pathlib import Path import numpy as np from PIL import Image def lowercase (_snake_case ) -> List[Any]: '''simple docstring''' __UpperCamelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b def lowercase (_snake_case ) -> int: '''simple docstring''' return (gray > 127) & (gray <= 255) def lowercase (_snake_case ,_snake_case ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = np.zeros_like(lowercase__ ) __UpperCamelCase = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image __UpperCamelCase = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): __UpperCamelCase = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() __UpperCamelCase = int(summation > 0 ) return output if __name__ == "__main__": # read original image _A = Path(__file__).resolve().parent / "image_data" / "lena.jpg" _A = np.array(Image.open(lena_path)) # kernel to be applied _A = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) _A = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image _A = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
505
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'AutoImageProcessor' lowercase_ = 'AutoTokenizer' def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]: """simple docstring""" super().__init__(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self.image_processor def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Any )-> Any: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
85
0
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin _lowercase : Union[str, Any] =get_tests_dir("""fixtures/test_sentencepiece.model""") _lowercase : Optional[int] =get_tests_dir("""fixtures/test_sentencepiece_bpe.model""") _lowercase : Any ="pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class UpperCamelCase_ ( UpperCamelCase_ , unittest.TestCase ): _a : Tuple = CamembertTokenizer _a : List[Any] = CamembertTokenizerFast _a : Any = True _a : List[str] = True def __a ( self : Tuple ): super().setUp() # We have a SentencePiece fixture for testing lowerCamelCase_ : Dict = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : Any ): lowerCamelCase_ : Union[str, Any] = '<pad>' lowerCamelCase_ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __a ( self : Optional[Any] ): lowerCamelCase_ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>NOTUSED' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a_ ) , 10_04 ) def __a ( self : Union[str, Any] ): self.assertEqual(self.get_tokenizer().vocab_size , 10_05 ) def __a ( self : List[Any] ): lowerCamelCase_ : Optional[int] = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) lowerCamelCase_ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) lowerCamelCase_ : str = 'I was born in 92000, and this is falsé.' lowerCamelCase_ : Tuple = tokenizer.encode(a_ ) lowerCamelCase_ : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) lowerCamelCase_ : str = tokenizer.encode(a_ , add_special_tokens=a_ ) lowerCamelCase_ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) lowerCamelCase_ : List[str] = tokenizer.convert_ids_to_tokens(a_ ) lowerCamelCase_ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) def __a ( self : Union[str, Any] ): if not self.test_rust_tokenizer: return lowerCamelCase_ : Optional[int] = self.get_tokenizer() lowerCamelCase_ : Optional[Any] = self.get_rust_tokenizer() lowerCamelCase_ : Tuple = 'I was born in 92000, and this is falsé.' lowerCamelCase_ : str = tokenizer.tokenize(a_ ) lowerCamelCase_ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) lowerCamelCase_ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ ) lowerCamelCase_ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) lowerCamelCase_ : int = self.get_rust_tokenizer() lowerCamelCase_ : Union[str, Any] = tokenizer.encode(a_ ) lowerCamelCase_ : Tuple = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def __a ( self : List[str] ): lowerCamelCase_ : Union[str, Any] = {'input_ids': [[5, 54, 71_96, 2_97, 30, 23, 7_76, 18, 11, 32_15, 37_05, 82_52, 22, 31_64, 11_81, 21_16, 29, 16, 8_13, 25, 7_91, 33_14, 20, 34_46, 38, 2_75_75, 1_20, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_68, 17, 11, 90_88, 20, 15_17, 8, 2_28_04, 1_88_18, 10, 38, 6_29, 6_07, 6_07, 1_42, 19, 71_96, 8_67, 56, 1_03_26, 24, 22_67, 20, 4_16, 50_72, 1_56_12, 2_33, 7_34, 7, 23_99, 27, 16, 30_15, 16_49, 7, 24, 20, 43_38, 23_99, 27, 13, 34_00, 14, 13, 61_89, 8, 9_30, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. lowerCamelCase_ : str = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
364
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _a ( lowercase__ : int = 3 ): '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(lowercase__ ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' ) SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits for i in range(lowercase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowercase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowercase__ , lowercase__ ) # simulate with 10000 shots SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' ) SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 ) return job.result().get_counts(lowercase__ ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
85
0
'''simple docstring''' import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def lowerCamelCase__ ( __lowercase = 3 ): if isinstance(lowercase__ , lowercase__ ): raise TypeError("""number of qubits must be a integer.""" ) if number_of_qubits <= 0: raise ValueError("""number of qubits must be > 0.""" ) if math.floor(lowercase__ ) != number_of_qubits: raise ValueError("""number of qubits must be exact integer.""" ) if number_of_qubits > 10: raise ValueError("""number of qubits too large to simulate(>10).""" ) snake_case : Tuple = QuantumRegister(lowercase__ , """qr""" ) snake_case : int = ClassicalRegister(lowercase__ , """cr""" ) snake_case : Tuple = QuantumCircuit(lowercase__ , lowercase__ ) snake_case : Tuple = number_of_qubits for i in range(lowercase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowercase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowercase__ , lowercase__ ) # simulate with 10000 shots snake_case : Optional[int] = Aer.get_backend("""qasm_simulator""" ) snake_case : Tuple = execute(lowercase__ , lowercase__ , shots=10_000 ) return job.result().get_counts(lowercase__ ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
116
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : int = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.mean(1 ) # Centralize the data of class i SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowercase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 ) SCREAMING_SNAKE_CASE__ : List[str] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.shape[1] SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : str = device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' if features.any(): SCREAMING_SNAKE_CASE__ : Any = features.mean(1 ) # Center the dataset SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) ) SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ ) # Take all the columns in the reverse order (-1), and then takes only the first SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ): '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh( covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions] SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] ) SCREAMING_SNAKE_CASE__ : str = 2 SCREAMING_SNAKE_CASE__ : Dict = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if isinstance(lowercase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ ) if not np.allclose(lowercase__ , lowercase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
85
0
def a_ ( __lowerCAmelCase ): lowerCAmelCase__ = n ** (1 / 3) return (val * val * val) == n if __name__ == "__main__": print(perfect_cube(27)) print(perfect_cube(4))
615
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip" SCREAMING_SNAKE_CASE__ : Dict = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig( temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage ) SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ ) original.eval() SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('convert the model' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('Make sure that the models\' outputs are identical' ) SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' ) # prepare the model inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE__ : int = encoder_input_ids SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = new_model( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) if are_identical: logging.info('all weights are equal up to 1e-3' ) else: raise ValueError('the weights are different. The new model is likely different from the original one.' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('saving the model\'s state dictionary' ) torch.save( new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
85
0
"""simple docstring""" from __future__ import annotations import math def _snake_case ( _snake_case : int ) -> str: '''simple docstring''' if 1 < number < 4: # 2 and 3 are primes return True elif number < 2 or number % 2 == 0 or number % 3 == 0: # Negatives, 0, 1, all even numbers, all multiples of 3 are not primes return False # All primes number are in format of 6k +/- 1 for i in range(5 , int(math.sqrt(lowercase__ ) + 1 ) , 6 ): if number % i == 0 or number % (i + 2) == 0: return False return True def _snake_case ( _snake_case : int ) -> List[Any]: '''simple docstring''' _A = str(lowercase__ ) _A = [n] for i in range(1 , len(lowercase__ ) ): list_nums.append(int(str_num[i:] ) ) list_nums.append(int(str_num[:-i] ) ) return list_nums def _snake_case ( _snake_case : int ) -> Tuple: '''simple docstring''' if len(str(lowercase__ ) ) > 3: if not is_prime(int(str(lowercase__ )[-3:] ) ) or not is_prime(int(str(lowercase__ )[:3] ) ): return False return True def _snake_case ( _snake_case : int = 11 ) -> int: '''simple docstring''' _A = [] _A = 13 while len(lowercase__ ) != count: if validate(lowercase__ ): _A = list_truncated_nums(lowercase__ ) if all(is_prime(lowercase__ ) for i in list_nums ): list_truncated_primes.append(lowercase__ ) num += 2 return list_truncated_primes def _snake_case ( ) -> Any: '''simple docstring''' return sum(compute_truncated_primes(11 ) ) if __name__ == "__main__": print(F'''{sum(compute_truncated_primes(11)) = }''')
7
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size SCREAMING_SNAKE_CASE__ : str = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = num_labels SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : str = len(a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_config() return config, pixel_values, labels def __lowercase( self : str )-> str: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowercase_ = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self ) SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __lowercase( self : Any )-> List[Any]: """simple docstring""" pass def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ): SCREAMING_SNAKE_CASE__ : Any = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(a_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE__ : List[Any] = layer_type SCREAMING_SNAKE_CASE__ : Union[str, Any] = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(a_ , a_ , a_ ) def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ): SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple() def recursive_check(a_ : List[Any] , a_ : int ): if isinstance(a_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ): recursive_check(a_ , a_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(a_ , a_ ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(a_ , a_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) def __lowercase( self : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def __lowercase( self : Any )-> List[str]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : List[Any] )-> int: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : Any = prepare_img() SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
85
0
# tests directory-specific settings - this file is run automatically # by pytest before any tests are run import sys import warnings from os.path import abspath, dirname, join # allow having multiple repository checkouts and not needing to remember to rerun # 'pip install -e .[dev]' when switching between checkouts and running tests. _lowerCamelCase : List[str] = abspath(join(dirname(dirname(__file__)), '''src''')) sys.path.insert(1, git_repo_path) # silence FutureWarning warnings in tests since often we can't act on them until # they become normal warnings - i.e. the tests still need to test the current functionality warnings.simplefilter(action='''ignore''', category=FutureWarning) def _a ( SCREAMING_SNAKE_CASE__ : int ) -> int: '''simple docstring''' from diffusers.utils.testing_utils import pytest_addoption_shared pytest_addoption_shared(lowercase__ ) def _a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> int: '''simple docstring''' from diffusers.utils.testing_utils import pytest_terminal_summary_main SCREAMING_SNAKE_CASE__ : Optional[Any] = terminalreporter.config.getoption("--make-reports" ) if make_reports: pytest_terminal_summary_main(lowercase__ , id=lowercase__ )
663
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" import os import unittest from huggingface_hub.utils import are_progress_bars_disabled import transformers.models.bart.tokenization_bart from transformers import logging from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context from transformers.utils.logging import disable_progress_bar, enable_progress_bar class a ( unittest.TestCase ): def UpperCamelCase ( self : Dict ) -> Tuple: lowerCamelCase_ = logging.get_logger() # the current default level is logging.WARNING lowerCamelCase_ = logging.get_verbosity() logging.set_verbosity_error() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_warning() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_info() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) logging.set_verbosity_debug() self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() ) # restore to the original level logging.set_verbosity(a_ ) def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]: lowerCamelCase_ = logging.get_verbosity() lowerCamelCase_ = logging.get_logger('transformers.models.bart.tokenization_bart' ) lowerCamelCase_ = 'Testing 1, 2, 3' # should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`) if level_origin <= logging.WARNING: with CaptureLogger(a_ ) as cl: logger.warning(a_ ) self.assertEqual(cl.out , msg + '\n' ) # this is setting the level for all of `transformers.*` loggers logging.set_verbosity_error() # should not be able to log warnings with CaptureLogger(a_ ) as cl: logger.warning(a_ ) self.assertEqual(cl.out , '' ) # should be able to log warnings again logging.set_verbosity_warning() with CaptureLogger(a_ ) as cl: logger.warning(a_ ) self.assertEqual(cl.out , msg + '\n' ) # restore to the original level logging.set_verbosity(a_ ) @mockenv(TRANSFORMERS_VERBOSITY='error' ) def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]: transformers.utils.logging._reset_library_root_logger() # this action activates the env var lowerCamelCase_ = logging.get_logger('transformers.models.bart.tokenization_bart' ) lowerCamelCase_ = os.getenv('TRANSFORMERS_VERBOSITY' , a_ ) lowerCamelCase_ = logging.log_levels[env_level_str] lowerCamelCase_ = logging.get_verbosity() self.assertEqual( a_ , a_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , ) # restore to the original level lowerCamelCase_ = '' transformers.utils.logging._reset_library_root_logger() @mockenv(TRANSFORMERS_VERBOSITY='super-error' ) def UpperCamelCase ( self : Any ) -> Union[str, Any]: transformers.utils.logging._reset_library_root_logger() lowerCamelCase_ = logging.logging.getLogger() with CaptureLogger(a_ ) as cl: # this action activates the env var logging.get_logger('transformers.models.bart.tokenization_bart' ) self.assertIn('Unknown option TRANSFORMERS_VERBOSITY=super-error' , cl.out ) # no need to restore as nothing was changed def UpperCamelCase ( self : int ) -> Tuple: transformers.utils.logging._reset_library_root_logger() lowerCamelCase_ = logging.get_logger('transformers.models.bart.tokenization_bart' ) lowerCamelCase_ = 'Testing 1, 2, 3' with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='1' ): # nothing should be logged as env var disables this method with CaptureLogger(a_ ) as cl: logger.warning_advice(a_ ) self.assertEqual(cl.out , '' ) with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='' ): # should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset with CaptureLogger(a_ ) as cl: logger.warning_advice(a_ ) self.assertEqual(cl.out , msg + '\n' ) def lowerCamelCase__ ( ) -> Optional[Any]: disable_progress_bar() assert are_progress_bars_disabled() enable_progress_bar() assert not are_progress_bars_disabled()
549
def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) if n == 0: return 0 SCREAMING_SNAKE_CASE__ : str = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : int = max( lowercase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase__ ) ) return max_revue def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : str = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(lowercase__ , lowercase__ , lowercase__ ) def _a ( lowercase__ : int , lowercase__ : list , lowercase__ : list ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: SCREAMING_SNAKE_CASE__ : List[str] = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Any = max( lowercase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = max_revenue return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. SCREAMING_SNAKE_CASE__ : Optional[int] = [float('-inf' ) for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : int = 0 for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = max_rev[i] for j in range(1 , i + 1 ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(lowercase__ , prices[j - 1] + max_rev[i - j] ) SCREAMING_SNAKE_CASE__ : Dict = max_revenue_i return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' if n < 0: SCREAMING_SNAKE_CASE__ : Tuple = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(lowercase__ ) if n > len(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = ( 'Each integral piece of rod must have a corresponding price. ' f'''Got n = {n} but length of prices = {len(lowercase__ )}''' ) raise ValueError(lowercase__ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [6, 10, 12, 15, 20, 23] SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. SCREAMING_SNAKE_CASE__ : Optional[Any] = 36 SCREAMING_SNAKE_CASE__ : Tuple = top_down_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = bottom_up_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = naive_cut_rod_recursive(lowercase__ , lowercase__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
85
0
"""simple docstring""" from dataclasses import dataclass from enum import Enum from typing import List, Optional, Union import numpy as np import PIL from PIL import Image from ...utils import BaseOutput, is_torch_available, is_transformers_available @dataclass class _SCREAMING_SNAKE_CASE( UpperCamelCase_ ): SCREAMING_SNAKE_CASE_ : Optional[int] = 42 SCREAMING_SNAKE_CASE_ : Any = 42 if is_transformers_available() and is_torch_available(): from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
498
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = CamembertTokenizer lowercase_ = CamembertTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Tuple )-> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>' SCREAMING_SNAKE_CASE__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>NOTUSED' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a_ ) , 1004 ) def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def __lowercase( self : List[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def __lowercase( self : List[str] )-> Dict: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. SCREAMING_SNAKE_CASE__ : str = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
85
0
'''simple docstring''' import sys import tempfile import unittest import unittest.mock as mock from pathlib import Path from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test sys.path.append(str(Path(__file__).parent.parent / '''utils''')) from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402 A__ : str = get_tests_dir('''fixtures''') class snake_case__ ( unittest.TestCase ): def A_ ( self : Dict ) -> Optional[int]: '''simple docstring''' __snake_case : Union[str, Any] = mock.Mock() __snake_case : Union[str, Any] = 500 __snake_case : int = {} __snake_case : Union[str, Any] = HTTPError __snake_case : str = {} # Download this model to make sure it's in the cache. __snake_case : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # Under the mock environment we get a 500 error when trying to reach the model. with mock.patch('requests.Session.request' , return_value=a_ ) as mock_head: __snake_case : int = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' ) # This check we did call the fake head request mock_head.assert_called() def A_ ( self : Tuple ) -> Optional[Any]: '''simple docstring''' __snake_case : List[str] = WavaVecaFeatureExtractor.from_pretrained( 'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' ) @is_staging_test class snake_case__ ( unittest.TestCase ): @classmethod def A_ ( cls : Optional[Any] ) -> Any: '''simple docstring''' __snake_case : List[Any] = TOKEN HfFolder.save_token(a_ ) @classmethod def A_ ( cls : str ) -> Any: '''simple docstring''' try: delete_repo(token=cls._token , repo_id='test-feature-extractor' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' ) except HTTPError: pass def A_ ( self : Dict ) -> Optional[Any]: '''simple docstring''' __snake_case : Any = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token ) __snake_case : str = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) # Reset repo delete_repo(token=self._token , repo_id='test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_ , repo_id='test-feature-extractor' , push_to_hub=a_ , use_auth_token=self._token ) __snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained(f'''{USER}/test-feature-extractor''' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) def A_ ( self : Any ) -> Dict: '''simple docstring''' __snake_case : Dict = WavaVecaFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token ) __snake_case : int = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) # Reset repo delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: feature_extractor.save_pretrained( a_ , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=a_ , use_auth_token=self._token ) __snake_case : str = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' ) for k, v in feature_extractor.__dict__.items(): self.assertEqual(a_ , getattr(a_ , a_ ) ) def A_ ( self : str ) -> int: '''simple docstring''' CustomFeatureExtractor.register_for_auto_class() __snake_case : Any = CustomFeatureExtractor.from_pretrained(a_ ) feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token ) # This has added the proper auto_map field to the config self.assertDictEqual( feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , ) __snake_case : Tuple = AutoFeatureExtractor.from_pretrained( f'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=a_ ) # Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
286
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"] SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
'''simple docstring''' from __future__ import annotations import bisect def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ): '''simple docstring''' if hi < 0: lowerCAmelCase_ : str = len(lowercase__ ) while lo < hi: lowerCAmelCase_ : Optional[Any] = lo + (hi - lo) // 2 if sorted_collection[mid] < item: lowerCAmelCase_ : Tuple = mid + 1 else: lowerCAmelCase_ : str = mid return lo def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ): '''simple docstring''' if hi < 0: lowerCAmelCase_ : str = len(lowercase__ ) while lo < hi: lowerCAmelCase_ : Any = lo + (hi - lo) // 2 if sorted_collection[mid] <= item: lowerCAmelCase_ : int = mid + 1 else: lowerCAmelCase_ : List[str] = mid return lo def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ): '''simple docstring''' sorted_collection.insert(bisect_left(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ) def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int = 0 , A__ : int = -1 ): '''simple docstring''' sorted_collection.insert(bisect_right(lowercase__ , lowercase__ , lowercase__ , lowercase__ ) , lowercase__ ) def UpperCamelCase_ ( A__ : list[int] , A__ : int ): '''simple docstring''' lowerCAmelCase_ : str = 0 lowerCAmelCase_ : str = len(lowercase__ ) - 1 while left <= right: lowerCAmelCase_ : Tuple = left + (right - left) // 2 lowerCAmelCase_ : List[str] = sorted_collection[midpoint] if current_item == item: return midpoint elif item < current_item: lowerCAmelCase_ : List[str] = midpoint - 1 else: lowerCAmelCase_ : Tuple = midpoint + 1 return None def UpperCamelCase_ ( A__ : list[int] , A__ : int ): '''simple docstring''' lowerCAmelCase_ : Union[str, Any] = bisect.bisect_left(lowercase__ , lowercase__ ) if index != len(lowercase__ ) and sorted_collection[index] == item: return index return None def UpperCamelCase_ ( A__ : list[int] , A__ : int , A__ : int , A__ : int ): '''simple docstring''' if right < left: return None lowerCAmelCase_ : Dict = left + (right - left) // 2 if sorted_collection[midpoint] == item: return midpoint elif sorted_collection[midpoint] > item: return binary_search_by_recursion(lowercase__ , lowercase__ , lowercase__ , midpoint - 1 ) else: return binary_search_by_recursion(lowercase__ , lowercase__ , midpoint + 1 , lowercase__ ) if __name__ == "__main__": __A : int = input("Enter numbers separated by comma:\n").strip() __A : Union[str, Any] = sorted(int(item) for item in user_input.split(",")) __A : Optional[Any] = int(input("Enter a single number to be found in the list:\n")) __A : Optional[Any] = binary_search(collection, target) if result is None: print(F'''{target} was not found in {collection}.''') else: print(F'''{target} was found at position {result} in {collection}.''')
275
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = do_rescale SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor SCREAMING_SNAKE_CASE__ : Dict = do_pad SCREAMING_SNAKE_CASE__ : Any = pad_size def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ ) def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_pad: SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ )
85
0
def _snake_case( SCREAMING_SNAKE_CASE__ ) -> Optional[int]: lowercase : Any = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def _snake_case( SCREAMING_SNAKE_CASE__ = 5_000 ) -> int: lowercase : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , lowercase__ )] for i, pentagonal_i in enumerate(lowercase__ ): for j in range(lowercase__ , len(lowercase__ ) ): lowercase : List[str] = pentagonal_nums[j] lowercase : List[str] = pentagonal_i + pentagonal_j lowercase : Dict = pentagonal_j - pentagonal_i if is_pentagonal(lowercase__ ) and is_pentagonal(lowercase__ ): return b return -1 if __name__ == "__main__": print(F'''{solution() = }''')
336
from pathlib import Path import numpy as np from PIL import Image def _a ( lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return (gray > 1_27) & (gray <= 2_55) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE__ : Optional[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE__ : List[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 ) return output if __name__ == "__main__": # read original image SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg" SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path)) # kernel to be applied SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
85
0
"""simple docstring""" from __future__ import annotations def lowercase (_snake_case ) -> Dict: '''simple docstring''' __UpperCamelCase = [True] * limit __UpperCamelCase = False __UpperCamelCase = False __UpperCamelCase = True for i in range(3 ,int(limit**0.5 + 1 ) ,2 ): __UpperCamelCase = i * 2 while index < limit: __UpperCamelCase = False __UpperCamelCase = index + i __UpperCamelCase = [2] for i in range(3 ,lowercase__ ,2 ): if is_prime[i]: primes.append(lowercase__ ) return primes def lowercase (_snake_case = 1000000 ) -> Optional[Any]: '''simple docstring''' __UpperCamelCase = prime_sieve(lowercase__ ) __UpperCamelCase = 0 __UpperCamelCase = 0 for i in range(len(lowercase__ ) ): for j in range(i + length ,len(lowercase__ ) ): __UpperCamelCase = sum(primes[i:j] ) if sol >= ceiling: break if sol in primes: __UpperCamelCase = j - i __UpperCamelCase = sol return largest if __name__ == "__main__": print(f"""{solution() = }""")
505
def _a ( lowercase__ : int = 60_08_51_47_51_43 ): '''simple docstring''' try: SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) SCREAMING_SNAKE_CASE__ : int = 2 SCREAMING_SNAKE_CASE__ : int = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 SCREAMING_SNAKE_CASE__ : str = i while n % i == 0: SCREAMING_SNAKE_CASE__ : List[Any] = n // i i += 1 return int(lowercase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
85
0
from typing import Any import numpy as np def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): return np.array_equal(lowercase__ ,matrix.conjugate().T ) def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ,lowerCAmelCase__ ): lowerCamelCase_ : Tuple = v.conjugate().T lowerCamelCase_ : Any = v_star.dot(lowercase__ ) assert isinstance(lowercase__ ,np.ndarray ) return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ )) def _SCREAMING_SNAKE_CASE ( ): lowerCamelCase_ : Tuple = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) lowerCamelCase_ : Optional[int] = np.array([[1], [2], [3]] ) assert is_hermitian(lowercase__ ), F"{a} is not hermitian." print(rayleigh_quotient(lowercase__ ,lowercase__ ) ) lowerCamelCase_ : Any = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(lowercase__ ), F"{a} is not hermitian." assert rayleigh_quotient(lowercase__ ,lowercase__ ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
364
def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = int(lowercase__ ) if n_element < 1: SCREAMING_SNAKE_CASE__ : Tuple = ValueError('a should be a positive number' ) raise my_error SCREAMING_SNAKE_CASE__ : Any = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (0, 0, 0) SCREAMING_SNAKE_CASE__ : Any = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") SCREAMING_SNAKE_CASE__ : int = hamming(int(n)) print("-----------------------------------------------------") print(F"""The list with nth numbers is: {hamming_numbers}""") print("-----------------------------------------------------")
85
0
'''simple docstring''' def lowerCamelCase__ ( __lowercase ): snake_case : Optional[Any] = [0] * len(lowercase__ ) for i in range(1 , len(lowercase__ ) ): # use last results for better performance - dynamic programming snake_case : Dict = prefix_result[i - 1] while j > 0 and input_string[i] != input_string[j]: snake_case : Union[str, Any] = prefix_result[j - 1] if input_string[i] == input_string[j]: j += 1 snake_case : str = j return prefix_result def lowerCamelCase__ ( __lowercase ): return max(prefix_function(lowercase__ ) ) if __name__ == "__main__": import doctest doctest.testmod()
116
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
from typing import TYPE_CHECKING from ...utils import _LazyModule __magic_name__ : List[str] = {"processing_wav2vec2_with_lm": ["Wav2Vec2ProcessorWithLM"]} if TYPE_CHECKING: from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM else: import sys __magic_name__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
615
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[str] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" import os from argparse import ArgumentParser from typing import List import torch.utils.data from datasets import Dataset, IterableDataset from datasets.distributed import split_dataset_by_node a = 4 a = 3 class lowercase_ ( UpperCamelCase_ ): '''simple docstring''' pass def _snake_case ( _snake_case : List[str] ) -> List[Any]: '''simple docstring''' for shard in shards: for i in range(lowercase__ ): yield {"i": i, "shard": shard} def _snake_case ( ) -> Optional[int]: '''simple docstring''' _A = int(os.environ['RANK'] ) _A = int(os.environ['WORLD_SIZE'] ) _A = ArgumentParser() parser.add_argument('--streaming' , type=lowercase__ ) parser.add_argument('--local_rank' , type=lowercase__ ) parser.add_argument('--num_workers' , type=lowercase__ , default=0 ) _A = parser.parse_args() _A = args.streaming _A = args.num_workers _A = {'shards': [F'''shard_{shard_idx}''' for shard_idx in range(lowercase__ )]} _A = IterableDataset.from_generator(lowercase__ , gen_kwargs=lowercase__ ) if not streaming: _A = Dataset.from_list(list(lowercase__ ) ) _A = split_dataset_by_node(lowercase__ , rank=lowercase__ , world_size=lowercase__ ) _A = torch.utils.data.DataLoader(lowercase__ , num_workers=lowercase__ ) _A = NUM_SHARDS * NUM_ITEMS_PER_SHARD _A = full_size // world_size expected_local_size += int(rank < (full_size % world_size) ) _A = sum(1 for _ in dataloader ) if local_size != expected_local_size: raise FailedTestError(F'''local_size {local_size} != expected_local_size {expected_local_size}''' ) if __name__ == "__main__": main()
7
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg") SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def _a ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def _a ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def _a ( ): '''simple docstring''' assert med.median_filter(lowercase__ , 3 ).any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
85
0
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def _a ( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Any ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = [] for part_id in partition_order: SCREAMING_SNAKE_CASE__ : Optional[int] = df.where(f'''SPARK_PARTITION_ID() = {part_id}''' ).collect() for row_idx, row in enumerate(lowercase__ ): expected_row_ids_and_row_dicts.append((f'''{part_id}_{row_idx}''', row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def _a ( ) -> Optional[Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() SCREAMING_SNAKE_CASE__ : Any = spark.range(1_00 ).repartition(1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = Spark(lowercase__ ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def _a ( ) -> Optional[int]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() SCREAMING_SNAKE_CASE__ : int = spark.range(10 ).repartition(2 ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [1, 0] SCREAMING_SNAKE_CASE__ : str = _generate_iterable_examples(lowercase__ , lowercase__ ) # Reverse the partitions. SCREAMING_SNAKE_CASE__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , lowercase__ ) for i, (row_id, row_dict) in enumerate(generate_fn() ): SCREAMING_SNAKE_CASE__ : Tuple = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _a ( ) -> Tuple: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() SCREAMING_SNAKE_CASE__ : str = spark.range(10 ).repartition(1 ) SCREAMING_SNAKE_CASE__ : int = SparkExamplesIterable(lowercase__ ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(lowercase__ ): assert row_id == f'''0_{i}''' assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def _a ( ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() SCREAMING_SNAKE_CASE__ : Dict = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("numpy.random.Generator" ) as generator_mock: SCREAMING_SNAKE_CASE__ : str = lambda SCREAMING_SNAKE_CASE__ : x.reverse() SCREAMING_SNAKE_CASE__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , [2, 1, 0] ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = SparkExamplesIterable(lowercase__ ).shuffle_data_sources(lowercase__ ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(lowercase__ ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _a ( ) -> Any: '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() SCREAMING_SNAKE_CASE__ : Union[str, Any] = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 SCREAMING_SNAKE_CASE__ : Tuple = SparkExamplesIterable(lowercase__ ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 SCREAMING_SNAKE_CASE__ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , [0, 2] ) for i, (row_id, row_dict) in enumerate(lowercase__ ): SCREAMING_SNAKE_CASE__ : str = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 SCREAMING_SNAKE_CASE__ : Optional[int] = SparkExamplesIterable(lowercase__ ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 SCREAMING_SNAKE_CASE__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase__ , [1, 3] ) for i, (row_id, row_dict) in enumerate(lowercase__ ): SCREAMING_SNAKE_CASE__ : str = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def _a ( ) -> Union[str, Any]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : int = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate() SCREAMING_SNAKE_CASE__ : Any = spark.range(1_00 ).repartition(1 ) SCREAMING_SNAKE_CASE__ : str = Spark(lowercase__ ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 1_00
663
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: SCREAMING_SNAKE_CASE__ : Tuple = json.load(f) @require_torch class snake_case ( unittest.TestCase ): def __lowercase( self : List[str] , a_ : Any )-> str: """simple docstring""" return FSMTTokenizer.from_pretrained(a_ ) def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]: """simple docstring""" # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ ) SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src'] SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt'] SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode( a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ ) print(a_ ) self.assertGreaterEqual(scores['bleu'] , a_ )
85
0
"""simple docstring""" import os import tempfile import unittest from transformers import FlaubertConfig, is_torch_available from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class a ( UpperCamelCase_ ): def __init__( self : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=13 , __SCREAMING_SNAKE_CASE : List[Any]=7 , __SCREAMING_SNAKE_CASE : Optional[int]=True , __SCREAMING_SNAKE_CASE : Any=True , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=False , __SCREAMING_SNAKE_CASE : Optional[int]=False , __SCREAMING_SNAKE_CASE : Union[str, Any]=False , __SCREAMING_SNAKE_CASE : int=2 , __SCREAMING_SNAKE_CASE : Dict=99 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0 , __SCREAMING_SNAKE_CASE : str=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=5 , __SCREAMING_SNAKE_CASE : Any=4 , __SCREAMING_SNAKE_CASE : int=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : List[str]=512 , __SCREAMING_SNAKE_CASE : Optional[int]=12 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : List[Any]=0.02 , __SCREAMING_SNAKE_CASE : Tuple=3 , __SCREAMING_SNAKE_CASE : int=4 , __SCREAMING_SNAKE_CASE : Optional[int]="last" , __SCREAMING_SNAKE_CASE : List[str]=None , __SCREAMING_SNAKE_CASE : List[Any]=None , ) -> Optional[int]: lowerCamelCase_ = parent lowerCamelCase_ = batch_size lowerCamelCase_ = seq_length lowerCamelCase_ = is_training lowerCamelCase_ = use_input_lengths lowerCamelCase_ = use_token_type_ids lowerCamelCase_ = use_labels lowerCamelCase_ = gelu_activation lowerCamelCase_ = sinusoidal_embeddings lowerCamelCase_ = causal lowerCamelCase_ = asm lowerCamelCase_ = n_langs lowerCamelCase_ = vocab_size lowerCamelCase_ = n_special lowerCamelCase_ = hidden_size lowerCamelCase_ = num_hidden_layers lowerCamelCase_ = num_attention_heads lowerCamelCase_ = hidden_dropout_prob lowerCamelCase_ = attention_probs_dropout_prob lowerCamelCase_ = max_position_embeddings lowerCamelCase_ = type_vocab_size lowerCamelCase_ = type_sequence_label_size lowerCamelCase_ = initializer_range lowerCamelCase_ = num_labels lowerCamelCase_ = num_choices lowerCamelCase_ = summary_type lowerCamelCase_ = use_proj lowerCamelCase_ = scope def UpperCamelCase ( self : Optional[int] ) -> Optional[int]: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] ) lowerCamelCase_ = None if self.use_input_lengths: lowerCamelCase_ = ( ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2 ) # small variation of seq_length lowerCamelCase_ = None if self.use_token_type_ids: lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.n_langs ) lowerCamelCase_ = None lowerCamelCase_ = None lowerCamelCase_ = None if self.use_labels: lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size ) lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) lowerCamelCase_ = ids_tensor([self.batch_size] , 2 ).float() lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices ) lowerCamelCase_ = self.get_config() return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def UpperCamelCase ( self : int ) -> Any: return FlaubertConfig( vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , ) def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Any , ) -> Union[str, Any]: lowerCamelCase_ = FlaubertModel(config=a_ ) model.to(a_ ) model.eval() lowerCamelCase_ = model(a_ , lengths=a_ , langs=a_ ) lowerCamelCase_ = model(a_ , langs=a_ ) lowerCamelCase_ = model(a_ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , ) -> Optional[Any]: lowerCamelCase_ = FlaubertWithLMHeadModel(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ = model(a_ , token_type_ids=a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Optional[int]: lowerCamelCase_ = FlaubertForQuestionAnsweringSimple(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ = model(a_ ) lowerCamelCase_ = model(a_ , start_positions=a_ , end_positions=a_ ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : int , ) -> Tuple: lowerCamelCase_ = FlaubertForQuestionAnswering(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ = model(a_ ) lowerCamelCase_ = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , p_mask=a_ , ) lowerCamelCase_ = model( a_ , start_positions=a_ , end_positions=a_ , cls_index=a_ , is_impossible=a_ , ) (lowerCamelCase_ ) = result_with_labels.to_tuple() lowerCamelCase_ = model(a_ , start_positions=a_ , end_positions=a_ ) (lowerCamelCase_ ) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape , () ) self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) ) self.parent.assertEqual( result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) ) def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Dict , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , ) -> Union[str, Any]: lowerCamelCase_ = FlaubertForSequenceClassification(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ = model(a_ ) lowerCamelCase_ = model(a_ , labels=a_ ) self.parent.assertEqual(result.loss.shape , () ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : List[str] , ) -> Tuple: lowerCamelCase_ = self.num_labels lowerCamelCase_ = FlaubertForTokenClassification(a_ ) model.to(a_ ) model.eval() lowerCamelCase_ = model(a_ , attention_mask=a_ , labels=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def UpperCamelCase ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any , ) -> Dict: lowerCamelCase_ = self.num_choices lowerCamelCase_ = FlaubertForMultipleChoice(config=a_ ) model.to(a_ ) model.eval() lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() lowerCamelCase_ = model( a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]: lowerCamelCase_ = self.prepare_config_and_inputs() ( lowerCamelCase_ ) = config_and_inputs lowerCamelCase_ = { 'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths, 'attention_mask': input_mask, } return config, inputs_dict @require_torch class a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): SCREAMING_SNAKE_CASE : Optional[int] = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE : List[Any] = ( { """feature-extraction""": FlaubertModel, """fill-mask""": FlaubertWithLMHeadModel, """question-answering""": FlaubertForQuestionAnsweringSimple, """text-classification""": FlaubertForSequenceClassification, """token-classification""": FlaubertForTokenClassification, """zero-shot""": FlaubertForSequenceClassification, } if is_torch_available() else {} ) def UpperCamelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int: if ( pipeline_test_casse_name == "QAPipelineTests" and tokenizer_name is not None and not tokenizer_name.endswith('Fast' ) ): # `QAPipelineTests` fails for a few models when the slower tokenizer are used. # (The slower tokenizers were never used for pipeline tests before the pipeline testing rework) # TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer return True return False def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : List[Any] , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : List[str]=False ) -> Dict: lowerCamelCase_ = super()._prepare_for_class(a_ , a_ , return_labels=a_ ) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": lowerCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) lowerCamelCase_ = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=a_ ) return inputs_dict def UpperCamelCase ( self : List[str] ) -> List[Any]: lowerCamelCase_ = FlaubertModelTester(self ) lowerCamelCase_ = ConfigTester(self , config_class=a_ , emb_dim=37 ) def UpperCamelCase ( self : Dict ) -> Any: self.config_tester.run_common_tests() def UpperCamelCase ( self : Tuple ) -> Optional[Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*a_ ) def UpperCamelCase ( self : Optional[Any] ) -> Dict: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*a_ ) def UpperCamelCase ( self : Union[str, Any] ) -> int: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*a_ ) def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*a_ ) def UpperCamelCase ( self : List[Any] ) -> int: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*a_ ) def UpperCamelCase ( self : Dict ) -> Tuple: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*a_ ) def UpperCamelCase ( self : Tuple ) -> str: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*a_ ) @slow def UpperCamelCase ( self : int ) -> List[Any]: for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowerCamelCase_ = FlaubertModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) @slow @require_torch_gpu def UpperCamelCase ( self : str ) -> Optional[int]: lowerCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: # FlauBertForMultipleChoice behaves incorrectly in JIT environments. if model_class == FlaubertForMultipleChoice: return lowerCamelCase_ = True lowerCamelCase_ = model_class(config=a_ ) lowerCamelCase_ = self._prepare_for_class(a_ , a_ ) lowerCamelCase_ = torch.jit.trace( a_ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) ) with tempfile.TemporaryDirectory() as tmp: torch.jit.save(a_ , os.path.join(a_ , 'traced_model.pt' ) ) lowerCamelCase_ = torch.jit.load(os.path.join(a_ , 'traced_model.pt' ) , map_location=a_ ) loaded(inputs_dict['input_ids'].to(a_ ) , inputs_dict['attention_mask'].to(a_ ) ) @require_torch class a ( unittest.TestCase ): @slow def UpperCamelCase ( self : Union[str, Any] ) -> str: lowerCamelCase_ = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' ) lowerCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] ) with torch.no_grad(): lowerCamelCase_ = model(a_ )[0] lowerCamelCase_ = torch.Size((1, 11, 768) ) self.assertEqual(output.shape , a_ ) lowerCamelCase_ = torch.tensor( [[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3] , a_ , atol=1e-4 ) )
549
import os import pytest from attr import dataclass SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region @dataclass class snake_case : lowercase_ = 42 lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' lowercase_ = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } lowercase_ = {**hyperparameters, 'max_steps': 1_000} @property def __lowercase( self : List[str] )-> str: """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" return F'''{self.framework}-transfromers-test''' @property def __lowercase( self : int )-> str: """simple docstring""" return F'''./tests/sagemaker/scripts/{self.framework}''' @property def __lowercase( self : Tuple )-> str: """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def _a ( lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
85
0
"""simple docstring""" import argparse import json import os import fairseq import torch from fairseq.data import Dictionary from transformers import ( WavaVecaConfig, WavaVecaCTCTokenizer, WavaVecaFeatureExtractor, WavaVecaForCTC, WavaVecaForPreTraining, WavaVecaProcessor, logging, ) from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification logging.set_verbosity_info() lowerCamelCase_ = logging.get_logger(__name__) lowerCamelCase_ = { "post_extract_proj": "feature_projection.projection", "encoder.pos_conv.0": "encoder.pos_conv_embed.conv", "self_attn.k_proj": "encoder.layers.*.attention.k_proj", "self_attn.v_proj": "encoder.layers.*.attention.v_proj", "self_attn.q_proj": "encoder.layers.*.attention.q_proj", "self_attn.out_proj": "encoder.layers.*.attention.out_proj", "self_attn_layer_norm": "encoder.layers.*.layer_norm", "fc1": "encoder.layers.*.feed_forward.intermediate_dense", "fc2": "encoder.layers.*.feed_forward.output_dense", "final_layer_norm": "encoder.layers.*.final_layer_norm", "encoder.layer_norm": "encoder.layer_norm", "adapter_layer": "encoder.layers.*.adapter_layer", "w2v_model.layer_norm": "feature_projection.layer_norm", "quantizer.weight_proj": "quantizer.weight_proj", "quantizer.vars": "quantizer.codevectors", "project_q": "project_q", "final_proj": "project_hid", "w2v_encoder.proj": "lm_head", "mask_emb": "masked_spec_embed", "pooling_layer.linear": "projector", "pooling_layer.projection": "classifier", } lowerCamelCase_ = [ "lm_head", "quantizer.weight_proj", "quantizer.codevectors", "project_q", "project_hid", "projector", "classifier", ] def __lowerCamelCase ( a_ : Any ) -> List[str]: __SCREAMING_SNAKE_CASE :List[str] = {} with open(lowercase__ , '''r''' ) as file: for line_number, line in enumerate(lowercase__ ): __SCREAMING_SNAKE_CASE :Any = line.strip() if line: __SCREAMING_SNAKE_CASE :Optional[Any] = line.split() __SCREAMING_SNAKE_CASE :Optional[Any] = line_number __SCREAMING_SNAKE_CASE :Optional[int] = words[0] __SCREAMING_SNAKE_CASE :str = value return result def __lowerCamelCase ( a_ : Dict , a_ : Tuple , a_ : str , a_ : Union[str, Any] , a_ : int ) -> Optional[int]: for attribute in key.split('''.''' ): __SCREAMING_SNAKE_CASE :Tuple = getattr(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE :List[str] = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __SCREAMING_SNAKE_CASE :Optional[Any] = PARAM_MAPPING[full_name.split('''.''' )[-1]] __SCREAMING_SNAKE_CASE :Tuple = 'param' if weight_type is not None and weight_type != "param": __SCREAMING_SNAKE_CASE :Union[str, Any] = getattr(lowercase__ , lowercase__ ).shape elif weight_type is not None and weight_type == "param": __SCREAMING_SNAKE_CASE :str = hf_pointer for attribute in hf_param_name.split('''.''' ): __SCREAMING_SNAKE_CASE :Tuple = getattr(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE :Optional[int] = shape_pointer.shape # let's reduce dimension __SCREAMING_SNAKE_CASE :Optional[Any] = value[0] else: __SCREAMING_SNAKE_CASE :Any = hf_pointer.shape if hf_shape != value.shape: raise ValueError( f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be''' f''' {value.shape} for {full_name}''' ) if weight_type == "weight": __SCREAMING_SNAKE_CASE :Optional[Any] = value elif weight_type == "weight_g": __SCREAMING_SNAKE_CASE :Dict = value elif weight_type == "weight_v": __SCREAMING_SNAKE_CASE :Any = value elif weight_type == "bias": __SCREAMING_SNAKE_CASE :int = value elif weight_type == "param": for attribute in hf_param_name.split('''.''' ): __SCREAMING_SNAKE_CASE :Union[str, Any] = getattr(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE :int = value else: __SCREAMING_SNAKE_CASE :List[str] = value logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' ) def __lowerCamelCase ( a_ : Union[str, Any] , a_ : Optional[int] , a_ : Any , a_ : Any , a_ : List[Any] ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE :Dict = None for param_key in PARAM_MAPPING.keys(): if full_name.endswith(lowercase__ ): __SCREAMING_SNAKE_CASE :List[str] = PARAM_MAPPING[full_name.split('''.''' )[-1]] __SCREAMING_SNAKE_CASE :List[str] = 'param' if weight_type is not None and weight_type != "param": __SCREAMING_SNAKE_CASE :str = '.'.join([key, weight_type] ) elif weight_type is not None and weight_type == "param": __SCREAMING_SNAKE_CASE :List[str] = '.'.join([key, hf_param_name] ) else: __SCREAMING_SNAKE_CASE :int = key __SCREAMING_SNAKE_CASE :int = value if 'lm_head' in full_key else value[0] lowerCamelCase_ = { "W_a": "linear_1.weight", "W_b": "linear_2.weight", "b_a": "linear_1.bias", "b_b": "linear_2.bias", "ln_W": "norm.weight", "ln_b": "norm.bias", } def __lowerCamelCase ( a_ : Dict , a_ : str , a_ : Tuple=None , a_ : Union[str, Any]=None ) -> List[Any]: __SCREAMING_SNAKE_CASE :Optional[Any] = False for key, mapped_key in MAPPING.items(): __SCREAMING_SNAKE_CASE :List[str] = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]: __SCREAMING_SNAKE_CASE :Dict = True if "*" in mapped_key: __SCREAMING_SNAKE_CASE :List[Any] = name.split(lowercase__ )[0].split('''.''' )[-2] __SCREAMING_SNAKE_CASE :List[str] = mapped_key.replace('''*''' , lowercase__ ) if "weight_g" in name: __SCREAMING_SNAKE_CASE :List[str] = 'weight_g' elif "weight_v" in name: __SCREAMING_SNAKE_CASE :Any = 'weight_v' elif "bias" in name: __SCREAMING_SNAKE_CASE :Dict = 'bias' elif "weight" in name: # TODO: don't match quantizer.weight_proj __SCREAMING_SNAKE_CASE :int = 'weight' else: __SCREAMING_SNAKE_CASE :Optional[int] = None if hf_dict is not None: rename_dict(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) else: set_recursively(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return is_used return is_used def __lowerCamelCase ( a_ : str , a_ : Optional[int] , a_ : List[Any] ) -> int: __SCREAMING_SNAKE_CASE :int = [] __SCREAMING_SNAKE_CASE :List[str] = fairseq_model.state_dict() __SCREAMING_SNAKE_CASE :Optional[Any] = hf_model.wavaveca.feature_extractor for name, value in fairseq_dict.items(): __SCREAMING_SNAKE_CASE :str = False if "conv_layers" in name: load_conv_layer( lowercase__ , lowercase__ , lowercase__ , lowercase__ , hf_model.config.feat_extract_norm == '''group''' , ) __SCREAMING_SNAKE_CASE :Optional[Any] = True else: __SCREAMING_SNAKE_CASE :str = load_wavaveca_layer(lowercase__ , lowercase__ , lowercase__ ) if not is_used: unused_weights.append(lowercase__ ) logger.warning(f'''Unused weights: {unused_weights}''' ) def __lowerCamelCase ( a_ : Dict , a_ : Any , a_ : Optional[int] , a_ : Dict , a_ : int ) -> Tuple: __SCREAMING_SNAKE_CASE :List[str] = full_name.split('''conv_layers.''' )[-1] __SCREAMING_SNAKE_CASE :Tuple = name.split('''.''' ) __SCREAMING_SNAKE_CASE :List[str] = int(items[0] ) __SCREAMING_SNAKE_CASE :Optional[Any] = int(items[1] ) if type_id == 0: if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' ) __SCREAMING_SNAKE_CASE :str = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' ) __SCREAMING_SNAKE_CASE :Any = value logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' ) elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm): if "bias" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' ) __SCREAMING_SNAKE_CASE :Optional[Any] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) elif "weight" in name: if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape: raise ValueError( f'''{full_name} has size {value.shape}, but''' f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' ) __SCREAMING_SNAKE_CASE :Optional[int] = value logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' ) else: unused_weights.append(lowercase__ ) @torch.no_grad() def __lowerCamelCase ( a_ : Tuple , a_ : Union[str, Any] , a_ : Optional[int]=None , a_ : Optional[Any]=None , a_ : Union[str, Any]=True , a_ : Dict=False ) -> Dict: if config_path is not None: __SCREAMING_SNAKE_CASE :Dict = WavaVecaConfig.from_pretrained(lowercase__ ) else: __SCREAMING_SNAKE_CASE :Dict = WavaVecaConfig() if is_seq_class: __SCREAMING_SNAKE_CASE :Union[str, Any] = read_txt_into_dict(lowercase__ ) __SCREAMING_SNAKE_CASE :str = idalabel __SCREAMING_SNAKE_CASE :str = WavaVecaForSequenceClassification(lowercase__ ) __SCREAMING_SNAKE_CASE :Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) feature_extractor.save_pretrained(lowercase__ ) elif is_finetuned: if dict_path: __SCREAMING_SNAKE_CASE :Union[str, Any] = Dictionary.load(lowercase__ ) # important change bos & pad token id since CTC symbol is <pad> and # not <s> as in fairseq __SCREAMING_SNAKE_CASE :Any = target_dict.pad_index __SCREAMING_SNAKE_CASE :int = target_dict.bos_index __SCREAMING_SNAKE_CASE :int = target_dict.eos_index __SCREAMING_SNAKE_CASE :Union[str, Any] = len(target_dict.symbols ) __SCREAMING_SNAKE_CASE :Any = os.path.join(lowercase__ , '''vocab.json''' ) if not os.path.isdir(lowercase__ ): logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(lowercase__ ) ) return os.makedirs(lowercase__ , exist_ok=lowercase__ ) __SCREAMING_SNAKE_CASE :int = target_dict.indices # fairseq has the <pad> and <s> switched __SCREAMING_SNAKE_CASE :List[Any] = 0 __SCREAMING_SNAKE_CASE :int = 1 with open(lowercase__ , '''w''' , encoding='''utf-8''' ) as vocab_handle: json.dump(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE :str = WavaVecaCTCTokenizer( lowercase__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=lowercase__ , ) __SCREAMING_SNAKE_CASE :List[str] = True if config.feat_extract_norm == 'layer' else False __SCREAMING_SNAKE_CASE :Optional[int] = WavaVecaFeatureExtractor( feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase__ , return_attention_mask=lowercase__ , ) __SCREAMING_SNAKE_CASE :List[str] = WavaVecaProcessor(feature_extractor=lowercase__ , tokenizer=lowercase__ ) processor.save_pretrained(lowercase__ ) __SCREAMING_SNAKE_CASE :Optional[int] = WavaVecaForCTC(lowercase__ ) else: __SCREAMING_SNAKE_CASE :Optional[Any] = WavaVecaForPreTraining(lowercase__ ) if is_finetuned or is_seq_class: __SCREAMING_SNAKE_CASE :Any = fairseq.checkpoint_utils.load_model_ensemble_and_task( [checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} ) else: __SCREAMING_SNAKE_CASE :Optional[int] = argparse.Namespace(task='''audio_pretraining''' ) __SCREAMING_SNAKE_CASE :Optional[int] = fairseq.tasks.setup_task(lowercase__ ) __SCREAMING_SNAKE_CASE :Optional[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase__ ) __SCREAMING_SNAKE_CASE :str = model[0].eval() recursively_load_weights(lowercase__ , lowercase__ , not is_finetuned ) hf_wavavec.save_pretrained(lowercase__ ) if __name__ == "__main__": lowerCamelCase_ = argparse.ArgumentParser() parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint") parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model") parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert") parser.add_argument( "--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not" ) parser.add_argument( "--is_seq_class", action="store_true", help="Whether the model to convert is a fine-tuned sequence classification model or not", ) lowerCamelCase_ = parser.parse_args() lowerCamelCase_ = not args.not_finetuned and not args.is_seq_class convert_wavaveca_checkpoint( args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, is_finetuned, args.is_seq_class, )
498
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = FunnelTokenizer lowercase_ = FunnelTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : str = [ '<unk>', '<cls>', '<sep>', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase( self : Any , **a_ : Any )-> List[str]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Tuple , **a_ : List[Any] )-> List[Any]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE__ : int = 'unwanted, running' return input_text, output_text def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(do_lower_case=a_ ) for tokenizer in tokenizers: SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer('UNwant\u00E9d,running' ) SCREAMING_SNAKE_CASE__ : List[Any] = len(inputs['input_ids'] ) - 1 self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' ) self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
85
0
'''simple docstring''' import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) A__ : Optional[int] = logging.getLogger(__name__) A__ : List[Any] = "Hello world! cécé herlolip" A__ : Dict = namedtuple( '''BertAbsConfig''', [ '''temp_dir''', '''large''', '''use_bert_emb''', '''finetune_bert''', '''encoder''', '''share_emb''', '''max_pos''', '''enc_layers''', '''enc_hidden_size''', '''enc_heads''', '''enc_ff_size''', '''enc_dropout''', '''dec_layers''', '''dec_hidden_size''', '''dec_heads''', '''dec_ff_size''', '''dec_dropout''', ], ) def a_ ( _UpperCAmelCase : List[str] ,_UpperCAmelCase : List[Any] ) -> Any: __snake_case : Optional[Any] = BertAbsConfig( temp_dir='.' ,finetune_bert=lowercase__ ,large=lowercase__ ,share_emb=lowercase__ ,use_bert_emb=lowercase__ ,encoder='bert' ,max_pos=5_12 ,enc_layers=6 ,enc_hidden_size=5_12 ,enc_heads=8 ,enc_ff_size=5_12 ,enc_dropout=0.2 ,dec_layers=6 ,dec_hidden_size=7_68 ,dec_heads=8 ,dec_ff_size=20_48 ,dec_dropout=0.2 ,) __snake_case : Union[str, Any] = torch.load(lowercase__ ,lambda _UpperCAmelCase ,_UpperCAmelCase : storage ) __snake_case : Any = AbsSummarizer(lowercase__ ,torch.device('cpu' ) ,lowercase__ ) original.eval() __snake_case : List[Any] = BertAbsSummarizer(lowercase__ ,torch.device('cpu' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('convert the model' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('Make sure that the models\' outputs are identical' ) __snake_case : Any = BertTokenizer.from_pretrained('bert-base-uncased' ) # prepare the model inputs __snake_case : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) __snake_case : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 ) __snake_case : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) __snake_case : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass __snake_case : int = encoder_input_ids __snake_case : Any = decoder_input_ids __snake_case : Union[str, Any] = None __snake_case : Dict = None __snake_case : str = None __snake_case : List[str] = None __snake_case : Optional[Any] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical __snake_case : Optional[Any] = original(lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )[0] __snake_case : Optional[int] = original.generator(lowercase__ ) __snake_case : Tuple = new_model( lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ ,lowercase__ )[0] __snake_case : List[Any] = new_model.generator(lowercase__ ) __snake_case : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) __snake_case : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) __snake_case : List[Any] = torch.allclose(lowercase__ ,lowercase__ ,atol=1E-3 ) if are_identical: logging.info('all weights are equal up to 1e-3' ) else: raise ValueError('the weights are different. The new model is likely different from the original one.' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('saving the model\'s state dictionary' ) torch.save( new_model.state_dict() ,'./bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' ) if __name__ == "__main__": A__ : Tuple = argparse.ArgumentParser() parser.add_argument( '''--bertabs_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''', ) A__ : Tuple = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
286
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case ( UpperCamelCase_ ): lowercase_ = 'levit' def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : Any = image_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Any = kernel_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride SCREAMING_SNAKE_CASE__ : Any = padding SCREAMING_SNAKE_CASE__ : Any = hidden_sizes SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = key_dim SCREAMING_SNAKE_CASE__ : int = drop_path_rate SCREAMING_SNAKE_CASE__ : List[str] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case ( UpperCamelCase_ ): lowercase_ = version.parse('1.11' ) @property def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowercase( self : Any )-> float: """simple docstring""" return 1e-4
85
0
'''simple docstring''' import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging __A : Union[str, Any] = logging.get_logger(__name__) __A : str = { "google/pix2struct-textcaps-base": ( "https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json" ), } class __snake_case ( UpperCamelCase_): """simple docstring""" lowercase = 'pix2struct_text_model' lowercase = ['past_key_values'] lowercase = { 'hidden_size': 'hidden_size', 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : Dict , lowerCamelCase : Any=5_02_44 , lowerCamelCase : List[str]=7_68 , lowerCamelCase : Any=64 , lowerCamelCase : Optional[int]=20_48 , lowerCamelCase : int=12 , lowerCamelCase : Dict=12 , lowerCamelCase : Dict=32 , lowerCamelCase : Optional[Any]=1_28 , lowerCamelCase : Union[str, Any]=0.1 , lowerCamelCase : str=1E-6 , lowerCamelCase : int=1.0 , lowerCamelCase : Any="gelu_new" , lowerCamelCase : Dict=0 , lowerCamelCase : Union[str, Any]=False , lowerCamelCase : Tuple=0 , lowerCamelCase : Tuple=1 , lowerCamelCase : int=False , lowerCamelCase : Union[str, Any]=True , **lowerCamelCase : Optional[Any] , ) -> Optional[int]: lowerCAmelCase_ : List[Any] = vocab_size lowerCAmelCase_ : int = hidden_size lowerCAmelCase_ : List[Any] = d_kv lowerCAmelCase_ : List[Any] = d_ff lowerCAmelCase_ : List[str] = num_layers lowerCAmelCase_ : List[str] = num_heads lowerCAmelCase_ : Optional[int] = relative_attention_num_buckets lowerCAmelCase_ : Any = relative_attention_max_distance lowerCAmelCase_ : Optional[int] = dropout_rate lowerCAmelCase_ : List[str] = layer_norm_epsilon lowerCAmelCase_ : str = initializer_factor lowerCAmelCase_ : List[str] = use_cache lowerCAmelCase_ : Any = eos_token_id lowerCAmelCase_ : Optional[int] = decoder_start_token_id # for backwards compatibility lowerCAmelCase_ : str = dense_act_fn super().__init__( pad_token_id=a_ , eos_token_id=a_ , decoder_start_token_id=a_ , tie_word_embeddings=a_ , is_decoder=a_ , **a_ , ) @classmethod def __lowercase ( cls : Tuple , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : int ) -> "PretrainedConfig": cls._set_token_in_kwargs(a_ ) lowerCAmelCase_ : Optional[int] = cls.get_config_dict(a_ , **a_ ) # get the text config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowerCAmelCase_ : str = config_dict['text_config'] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a_ , **a_ ) class __snake_case ( UpperCamelCase_): """simple docstring""" lowercase = 'pix2struct_vision_model' def __init__( self : Optional[Any] , lowerCamelCase : str=7_68 , lowerCamelCase : str=7_68 , lowerCamelCase : str=20_48 , lowerCamelCase : Optional[Any]=64 , lowerCamelCase : Union[str, Any]=12 , lowerCamelCase : Any=12 , lowerCamelCase : str="gelu_new" , lowerCamelCase : Optional[int]=1E-6 , lowerCamelCase : Union[str, Any]=0.0 , lowerCamelCase : Dict=0.0 , lowerCamelCase : Any=1E-10 , lowerCamelCase : Union[str, Any]=1.0 , lowerCamelCase : List[Any]=40_96 , lowerCamelCase : Union[str, Any]=32 , lowerCamelCase : Tuple=1_28 , **lowerCamelCase : Optional[Any] , ) -> Optional[int]: super().__init__(**a_ ) lowerCAmelCase_ : Tuple = hidden_size lowerCAmelCase_ : Dict = patch_embed_hidden_size lowerCAmelCase_ : int = d_ff lowerCAmelCase_ : Tuple = dropout_rate lowerCAmelCase_ : str = num_hidden_layers lowerCAmelCase_ : Optional[Any] = num_attention_heads lowerCAmelCase_ : Tuple = initializer_range lowerCAmelCase_ : Optional[int] = initializer_factor lowerCAmelCase_ : Union[str, Any] = attention_dropout lowerCAmelCase_ : str = layer_norm_eps lowerCAmelCase_ : Union[str, Any] = dense_act_fn lowerCAmelCase_ : Dict = seq_len lowerCAmelCase_ : Optional[Any] = relative_attention_num_buckets lowerCAmelCase_ : Any = relative_attention_max_distance lowerCAmelCase_ : Dict = d_kv @classmethod def __lowercase ( cls : Tuple , lowerCamelCase : Union[str, os.PathLike] , **lowerCamelCase : Dict ) -> "PretrainedConfig": cls._set_token_in_kwargs(a_ ) lowerCAmelCase_ : Tuple = cls.get_config_dict(a_ , **a_ ) # get the vision config dict if we are loading from Pix2StructConfig if config_dict.get("""model_type""" ) == "pix2struct": lowerCAmelCase_ : List[Any] = config_dict['vision_config'] if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type: logger.warning( F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type ' F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' ) return cls.from_dict(a_ , **a_ ) class __snake_case ( UpperCamelCase_): """simple docstring""" lowercase = 'pix2struct' lowercase = True def __init__( self : str , lowerCamelCase : List[Any]=None , lowerCamelCase : Dict=None , lowerCamelCase : int=1.0 , lowerCamelCase : Optional[int]=0.02 , lowerCamelCase : Any=False , lowerCamelCase : Tuple=False , lowerCamelCase : List[Any]=True , **lowerCamelCase : List[str] , ) -> int: super().__init__(tie_word_embeddings=a_ , is_encoder_decoder=a_ , **a_ ) if text_config is None: lowerCAmelCase_ : Tuple = {} logger.info("""text_config is None. Initializing the Pix2StructTextConfig with default values.""" ) if vision_config is None: lowerCAmelCase_ : int = {} logger.info("""vision_config is None. Initializing the Pix2StructVisionConfig with default values.""" ) lowerCAmelCase_ : int = PixaStructTextConfig(**a_ ) lowerCAmelCase_ : str = PixaStructVisionConfig(**a_ ) lowerCAmelCase_ : Dict = self.text_config.decoder_start_token_id lowerCAmelCase_ : List[Any] = self.text_config.pad_token_id lowerCAmelCase_ : int = self.text_config.eos_token_id lowerCAmelCase_ : Dict = initializer_factor lowerCAmelCase_ : str = initializer_range lowerCAmelCase_ : Union[str, Any] = self.initializer_range lowerCAmelCase_ : str = self.initializer_range lowerCAmelCase_ : List[Any] = is_vqa @classmethod def __lowercase ( cls : Any , lowerCamelCase : PixaStructTextConfig , lowerCamelCase : PixaStructVisionConfig , **lowerCamelCase : Any ) -> Tuple: return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **a_ ) def __lowercase ( self : Optional[Any] ) -> str: lowerCAmelCase_ : Tuple = copy.deepcopy(self.__dict__ ) lowerCAmelCase_ : Any = self.text_config.to_dict() lowerCAmelCase_ : List[Any] = self.vision_config.to_dict() lowerCAmelCase_ : Optional[int] = self.__class__.model_type return output
275
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = StableDiffusionInstructPixaPixPipeline lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowercase( self : str )-> int: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ ) SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE__ : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' ) if str(a_ ).startswith('mps' ): SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries' SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ ) SCREAMING_SNAKE_CASE__ : Dict = output.images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2 SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5 SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(a_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae'] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def __lowercase( self : Tuple )-> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) SCREAMING_SNAKE_CASE__ : Tuple = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 0 def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None: SCREAMING_SNAKE_CASE__ : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() pipe(**a_ , callback=a_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __lowercase( self : int )-> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) ) SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
85
0
from __future__ import annotations from typing import Any class __snake_case : def __init__( self ,snake_case ,snake_case ,snake_case = 0 ): '''simple docstring''' lowercase : str = row, column lowercase : List[str] = [[default_value for c in range(a_ )] for r in range(a_ )] def __str__( self ): '''simple docstring''' lowercase : List[Any] = f"Matrix consist of {self.row} rows and {self.column} columns\n" # Make string identifier lowercase : Dict = 0 for row_vector in self.array: for obj in row_vector: lowercase : int = max(a_ ,len(str(a_ ) ) ) lowercase : Optional[int] = f"%{max_element_length}s" # Make string and return def single_line(snake_case ) -> str: nonlocal string_format_identifier lowercase : Optional[int] = '[' line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(a_ ) for row_vector in self.array ) return s def __repr__( self ): '''simple docstring''' return str(self ) def _SCREAMING_SNAKE_CASE ( self ,snake_case ): '''simple docstring''' if not (isinstance(a_ ,(list, tuple) ) and len(a_ ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self ,snake_case ): '''simple docstring''' assert self.validate_indicies(a_ ) return self.array[loc[0]][loc[1]] def __setitem__( self ,snake_case ,snake_case ): '''simple docstring''' assert self.validate_indicies(a_ ) lowercase : Tuple = value def __add__( self ,snake_case ): '''simple docstring''' assert isinstance(a_ ,a_ ) assert self.row == another.row and self.column == another.column # Add lowercase : List[str] = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): lowercase : List[Any] = self[r, c] + another[r, c] return result def __neg__( self ): '''simple docstring''' lowercase : Dict = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): lowercase : Optional[Any] = -self[r, c] return result def __sub__( self ,snake_case ): '''simple docstring''' return self + (-another) def __mul__( self ,snake_case ): '''simple docstring''' if isinstance(a_ ,(int, float) ): # Scalar multiplication lowercase : List[str] = Matrix(self.row ,self.column ) for r in range(self.row ): for c in range(self.column ): lowercase : List[str] = self[r, c] * another return result elif isinstance(a_ ,a_ ): # Matrix multiplication assert self.column == another.row lowercase : Tuple = Matrix(self.row ,another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: lowercase : Optional[Any] = f"Unsupported type given for another ({type(a_ )})" raise TypeError(a_ ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = Matrix(self.column ,self.row ) for r in range(self.row ): for c in range(self.column ): lowercase : int = self[r, c] return result def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ): '''simple docstring''' assert isinstance(a_ ,a_ ) and isinstance(a_ ,a_ ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate lowercase : str = v.transpose() lowercase : Tuple = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def _snake_case( ) -> Any: lowercase : List[str] = Matrix(3 , 3 , 0 ) for i in range(3 ): lowercase : Any = 1 print(f"a^(-1) is {ainv}" ) # u, v lowercase : Optional[Any] = Matrix(3 , 1 , 0 ) lowercase : Union[str, Any] = 1, 2, -3 lowercase : Union[str, Any] = Matrix(3 , 1 , 0 ) lowercase : List[Any] = 4, -2, 5 print(f"u is {u}" ) print(f"v is {v}" ) print(f"uv^T is {u * v.transpose()}" ) # Sherman Morrison print(f"(a + uv^T)^(-1) is {ainv.sherman_morrison(lowercase__ , lowercase__ )}" ) def _snake_case( ) -> str: import doctest doctest.testmod() testa()
336
import math from collections.abc import Callable def _a ( lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : float = xa SCREAMING_SNAKE_CASE__ : float = xa while True: if x_n == x_na or function(lowercase__ ) == function(lowercase__ ): raise ZeroDivisionError('float division by zero, could not find root' ) SCREAMING_SNAKE_CASE__ : float = x_na - ( function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na SCREAMING_SNAKE_CASE__ : Dict = x_na SCREAMING_SNAKE_CASE__ : List[str] = x_na def _a ( lowercase__ : float ): '''simple docstring''' return math.pow(lowercase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
85
0
"""simple docstring""" from statistics import mean import numpy as np def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> Any: '''simple docstring''' __UpperCamelCase = 0 # Number of processes finished __UpperCamelCase = 0 # Displays the finished process. # If it is 0, the performance is completed if it is 1, before the performance. __UpperCamelCase = [0] * no_of_process # List to include calculation results __UpperCamelCase = [0] * no_of_process # Sort by arrival time. __UpperCamelCase = [burst_time[i] for i in np.argsort(lowercase__ )] __UpperCamelCase = [process_name[i] for i in np.argsort(lowercase__ )] arrival_time.sort() while no_of_process > finished_process_count: __UpperCamelCase = 0 while finished_process[i] == 1: i += 1 if current_time < arrival_time[i]: __UpperCamelCase = arrival_time[i] __UpperCamelCase = 0 # Index showing the location of the process being performed __UpperCamelCase = 0 # Saves the current response ratio. __UpperCamelCase = 0 for i in range(0 ,lowercase__ ): if finished_process[i] == 0 and arrival_time[i] <= current_time: __UpperCamelCase = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[ i ] if response_ratio < temp: __UpperCamelCase = temp __UpperCamelCase = i # Calculate the turn around time __UpperCamelCase = current_time + burst_time[loc] - arrival_time[loc] current_time += burst_time[loc] # Indicates that the process has been performed. __UpperCamelCase = 1 # Increase finished_process_count by 1 finished_process_count += 1 return turn_around_time def lowercase (_snake_case ,_snake_case ,_snake_case ,_snake_case ) -> int: '''simple docstring''' __UpperCamelCase = [0] * no_of_process for i in range(0 ,lowercase__ ): __UpperCamelCase = turn_around_time[i] - burst_time[i] return waiting_time if __name__ == "__main__": _A = 5 _A = ["A", "B", "C", "D", "E"] _A = [1, 2, 3, 4, 5] _A = [1, 2, 3, 4, 5] _A = calculate_turn_around_time( process_name, arrival_time, burst_time, no_of_process ) _A = calculate_waiting_time( process_name, turn_around_time, burst_time, no_of_process ) print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time") for i in range(0, no_of_process): print( f"""{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t""" f"""{turn_around_time[i]}\t\t\t{waiting_time[i]}""" ) print(f"""average waiting time : {mean(waiting_time):.5f}""") print(f"""average turn around time : {mean(turn_around_time):.5f}""")
505
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'AutoImageProcessor' lowercase_ = 'AutoTokenizer' def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]: """simple docstring""" super().__init__(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self.image_processor def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Any )-> Any: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
85
0
import argparse import logging import os import datasets import tensorflow as tf from transformers import AutoTokenizer _lowercase : Any =logging.getLogger(__name__) def _SCREAMING_SNAKE_CASE ( ): lowerCamelCase_ : Optional[int] = argparse.ArgumentParser( description='Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.' ) parser.add_argument( '--dataset_name' ,type=lowercase__ ,default='wikitext' ,help='Name of the training. Explore datasets at: hf.co/datasets.' ,) parser.add_argument( '--dataset_config' ,type=lowercase__ ,default='wikitext-103-raw-v1' ,help='Configuration name of the dataset.' ) parser.add_argument( '--tokenizer_name_or_path' ,type=lowercase__ ,default='sayakpaul/unigram-tokenizer-wikitext' ,help='Tokenizer identifier. Can be a local filepath or a Hub identifier.' ,) parser.add_argument( '--shard_size' ,type=lowercase__ ,default=10_00 ,help='Number of entries to go in a single shard.' ,) parser.add_argument('--split' ,type=lowercase__ ,default='train' ,choices=['train', 'test', 'validation'] ) parser.add_argument( '--limit' ,default=lowercase__ ,type=lowercase__ ,help='Limit the number of shards (used for debugging).' ,) parser.add_argument( '--max_length' ,type=lowercase__ ,default=5_12 ,help='Maximum sequence length. For training on TPUs, it helps to have a maximum' ' sequence length that is a multiple of 8.' ,) parser.add_argument( '--output_dir' ,default='tf-tpu' ,type=lowercase__ ,help='Output directory where the TFRecord shards will be saved. If the' ' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord' ' shards will be directly saved to a Google Cloud Storage bucket.' ,) lowerCamelCase_ : Dict = parser.parse_args() return args def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): def fn(lowerCAmelCase__ ): return tokenizer(examples['text'] ) return fn def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): lowerCamelCase_ : Optional[Any] = [] for i in range(len(tokenized_data['input_ids'] ) ): lowerCamelCase_ : List[Any] = { 'input_ids': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['input_ids'][i] ) ), 'attention_mask': tf.train.Feature( intaa_list=tf.train.IntaaList(value=tokenized_data['attention_mask'][i] ) ), } lowerCamelCase_ : str = tf.train.Features(feature=lowercase__ ) lowerCamelCase_ : Union[str, Any] = tf.train.Example(features=lowercase__ ) lowerCamelCase_ : Dict = example.SerializeToString() records.append(lowercase__ ) return records def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): lowerCamelCase_ : Optional[Any] = datasets.load_dataset(args.dataset_name ,args.dataset_config ,split=args.split ) if args.limit is not None: lowerCamelCase_ : str = min(len(lowercase__ ) ,args.limit ) lowerCamelCase_ : str = dataset.select(range(lowercase__ ) ) print(F"Limiting the dataset to {args.limit} entries." ) lowerCamelCase_ : List[str] = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path ) # Handle output directory creation. # For serializing into a Google Cloud Storage Bucket, one needs to first # create a bucket. if "gs" not in args.output_dir: if not os.path.exists(args.output_dir ): os.makedirs(args.output_dir ) lowerCamelCase_ : Dict = os.path.join(args.output_dir ,args.split ) if not os.path.exists(lowercase__ ): os.makedirs(lowercase__ ) else: lowerCamelCase_ : List[str] = os.path.join(args.output_dir ,args.split ) # Tokenize the whole dataset at once. lowerCamelCase_ : Tuple = tokenize_function(lowercase__ ) lowerCamelCase_ : Tuple = dataset.map(lowercase__ ,batched=lowercase__ ,num_proc=4 ,remove_columns=['text'] ) # We need to concatenate all our texts together, and then split the result # into chunks of a fixed size, which we will call block_size. To do this, we # will use the map method again, with the option batched=True. When we use batched=True, # the function we pass to map() will be passed multiple inputs at once, allowing us # to group them into more or fewer examples than we had in the input. # This allows us to create our new fixed-length samples. The advantage of this # method is that we don't lose a whole lot of content from the dataset compared to the # case where we simply tokenize with a pre-defined max_length. def group_texts(lowerCAmelCase__ ): # Concatenate all texts. lowerCamelCase_ : List[str] = {k: sum(examples[k] ,[] ) for k in examples.keys()} lowerCamelCase_ : List[Any] = len(concatenated_examples[list(examples.keys() )[0]] ) # We drop the small remainder, though you could add padding instead if the model supports it # In this, as in all things, we advise you to follow your heart 🫀 lowerCamelCase_ : str = (total_length // args.max_length) * args.max_length # Split by chunks of max_len. lowerCamelCase_ : Optional[Any] = { k: [t[i : i + args.max_length] for i in range(0 ,lowercase__ ,args.max_length )] for k, t in concatenated_examples.items() } return result lowerCamelCase_ : Dict = dataset_tokenized.map(lowercase__ ,batched=lowercase__ ,batch_size=10_00 ,num_proc=4 ) lowerCamelCase_ : Tuple = 0 lowerCamelCase_ : Optional[int] = 0 for shard in range(0 ,len(lowercase__ ) ,args.shard_size ): lowerCamelCase_ : str = grouped_dataset[shard : shard + args.shard_size] lowerCamelCase_ : List[str] = len(dataset_snapshot['input_ids'] ) lowerCamelCase_ : List[Any] = os.path.join(lowercase__ ,F"dataset-{shard_count}-{records_containing}.tfrecord" ) lowerCamelCase_ : List[Any] = get_serialized_examples(lowercase__ ) with tf.io.TFRecordWriter(lowercase__ ) as out_file: for i in range(len(lowercase__ ) ): lowerCamelCase_ : int = serialized_examples[i] out_file.write(lowercase__ ) print('Wrote file {} containing {} records'.format(lowercase__ ,lowercase__ ) ) shard_count += 1 total_records += records_containing with open(F"split-{args.split}-records-count.txt" ,'w' ) as f: print(F"Total {args.split} records: {total_records}" ,file=lowercase__ ) if __name__ == "__main__": _lowercase : str =parse_args() main(args)
364
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _a ( lowercase__ : int = 3 ): '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(lowercase__ ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' ) SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits for i in range(lowercase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowercase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowercase__ , lowercase__ ) # simulate with 10000 shots SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' ) SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 ) return job.result().get_counts(lowercase__ ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
85
0
'''simple docstring''' import argparse import json from pathlib import Path import requests import timm import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import DeiTConfig, DeiTForImageClassificationWithTeacher, DeiTImageProcessor from transformers.utils import logging logging.set_verbosity_info() lowercase : Union[str, Any] = logging.get_logger(__name__) def lowerCamelCase__ ( __lowercase , __lowercase=False ): snake_case : int = [] for i in range(config.num_hidden_layers ): # encoder layers: output projection, 2 feedforward neural networks and 2 layernorms rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''deit.encoder.layer.{i}.layernorm_before.weight''') ) rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''deit.encoder.layer.{i}.layernorm_before.bias''') ) rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''deit.encoder.layer.{i}.attention.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''deit.encoder.layer.{i}.attention.output.dense.bias''') ) rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''deit.encoder.layer.{i}.layernorm_after.weight''') ) rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''deit.encoder.layer.{i}.layernorm_after.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''deit.encoder.layer.{i}.intermediate.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''deit.encoder.layer.{i}.intermediate.dense.bias''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''deit.encoder.layer.{i}.output.dense.weight''') ) rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''deit.encoder.layer.{i}.output.dense.bias''') ) # projection layer + position embeddings rename_keys.extend( [ ("""cls_token""", """deit.embeddings.cls_token"""), ("""dist_token""", """deit.embeddings.distillation_token"""), ("""patch_embed.proj.weight""", """deit.embeddings.patch_embeddings.projection.weight"""), ("""patch_embed.proj.bias""", """deit.embeddings.patch_embeddings.projection.bias"""), ("""pos_embed""", """deit.embeddings.position_embeddings"""), ] ) if base_model: # layernorm + pooler rename_keys.extend( [ ("""norm.weight""", """layernorm.weight"""), ("""norm.bias""", """layernorm.bias"""), ("""pre_logits.fc.weight""", """pooler.dense.weight"""), ("""pre_logits.fc.bias""", """pooler.dense.bias"""), ] ) # if just the base model, we should remove "deit" from all keys that start with "deit" snake_case : Tuple = [(pair[0], pair[1][4:]) if pair[1].startswith("""deit""" ) else pair for pair in rename_keys] else: # layernorm + classification heads rename_keys.extend( [ ("""norm.weight""", """deit.layernorm.weight"""), ("""norm.bias""", """deit.layernorm.bias"""), ("""head.weight""", """cls_classifier.weight"""), ("""head.bias""", """cls_classifier.bias"""), ("""head_dist.weight""", """distillation_classifier.weight"""), ("""head_dist.bias""", """distillation_classifier.bias"""), ] ) return rename_keys def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase=False ): for i in range(config.num_hidden_layers ): if base_model: snake_case : Optional[Any] = '' else: snake_case : Optional[Any] = 'deit.' # read in weights + bias of input projection layer (in timm, this is a single matrix + bias) snake_case : Any = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' ) snake_case : Union[str, Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' ) # next, add query, keys and values (in that order) to the state dict snake_case : Optional[int] = in_proj_weight[ : config.hidden_size, : ] snake_case : Union[str, Any] = in_proj_bias[: config.hidden_size] snake_case : str = in_proj_weight[ config.hidden_size : config.hidden_size * 2, : ] snake_case : int = in_proj_bias[ config.hidden_size : config.hidden_size * 2 ] snake_case : int = in_proj_weight[ -config.hidden_size :, : ] snake_case : List[str] = in_proj_bias[-config.hidden_size :] def lowerCamelCase__ ( __lowercase , __lowercase , __lowercase ): snake_case : int = dct.pop(lowercase__ ) snake_case : str = val def lowerCamelCase__ ( ): snake_case : Optional[int] = 'http://images.cocodataset.org/val2017/000000039769.jpg' snake_case : Any = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw ) return im @torch.no_grad() def lowerCamelCase__ ( __lowercase , __lowercase ): snake_case : Tuple = DeiTConfig() # all deit models have fine-tuned heads snake_case : Optional[Any] = False # dataset (fine-tuned on ImageNet 2012), patch_size and image_size snake_case : List[Any] = 1_000 snake_case : str = 'huggingface/label-files' snake_case : Optional[int] = 'imagenet-1k-id2label.json' snake_case : Dict = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) ) snake_case : Any = {int(lowercase__ ): v for k, v in idalabel.items()} snake_case : int = idalabel snake_case : Any = {v: k for k, v in idalabel.items()} snake_case : Any = int(deit_name[-6:-4] ) snake_case : List[Any] = int(deit_name[-3:] ) # size of the architecture if deit_name[9:].startswith("""tiny""" ): snake_case : Optional[int] = 192 snake_case : str = 768 snake_case : Optional[int] = 12 snake_case : Optional[Any] = 3 elif deit_name[9:].startswith("""small""" ): snake_case : List[Any] = 384 snake_case : Dict = 1_536 snake_case : Dict = 12 snake_case : List[str] = 6 if deit_name[9:].startswith("""base""" ): pass elif deit_name[4:].startswith("""large""" ): snake_case : List[str] = 1_024 snake_case : Union[str, Any] = 4_096 snake_case : List[str] = 24 snake_case : str = 16 # load original model from timm snake_case : Union[str, Any] = timm.create_model(lowercase__ , pretrained=lowercase__ ) timm_model.eval() # load state_dict of original model, remove and rename some keys snake_case : Optional[Any] = timm_model.state_dict() snake_case : Any = create_rename_keys(lowercase__ , lowercase__ ) for src, dest in rename_keys: rename_key(lowercase__ , lowercase__ , lowercase__ ) read_in_q_k_v(lowercase__ , lowercase__ , lowercase__ ) # load HuggingFace model snake_case : str = DeiTForImageClassificationWithTeacher(lowercase__ ).eval() model.load_state_dict(lowercase__ ) # Check outputs on an image, prepared by DeiTImageProcessor snake_case : Tuple = int( (256 / 224) * config.image_size ) # to maintain same ratio w.r.t. 224 images, see https://github.com/facebookresearch/deit/blob/ab5715372db8c6cad5740714b2216d55aeae052e/datasets.py#L103 snake_case : Optional[Any] = DeiTImageProcessor(size=lowercase__ , crop_size=config.image_size ) snake_case : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""" ) snake_case : Any = encoding['pixel_values'] snake_case : Tuple = model(lowercase__ ) snake_case : List[str] = timm_model(lowercase__ ) assert timm_logits.shape == outputs.logits.shape assert torch.allclose(lowercase__ , outputs.logits , atol=1e-3 ) Path(lowercase__ ).mkdir(exist_ok=lowercase__ ) print(F'''Saving model {deit_name} to {pytorch_dump_folder_path}''' ) model.save_pretrained(lowercase__ ) print(F'''Saving image processor to {pytorch_dump_folder_path}''' ) image_processor.save_pretrained(lowercase__ ) if __name__ == "__main__": lowercase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--deit_name""", default="""vit_deit_base_distilled_patch16_224""", type=str, help="""Name of the DeiT timm model you'd like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory.""" ) lowercase : Dict = parser.parse_args() convert_deit_checkpoint(args.deit_name, args.pytorch_dump_folder_path)
116
import logging import numpy as np import pytest from scipy.linalg import eigh logging.basicConfig(level=logging.INFO, format="%(message)s") def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return input_array.reshape((input_array.size, 1) ) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : int = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.mean(1 ) # Centralize the data of class i SCREAMING_SNAKE_CASE__ : Optional[Any] = data - column_reshape(lowercase__ ) if i > 0: # If covariance_sum is not None covariance_sum += np.dot(lowercase__ , centered_data.T ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : Any = np.dot(lowercase__ , centered_data.T ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = features.mean(1 ) SCREAMING_SNAKE_CASE__ : List[str] = np.nan for i in range(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = features[:, labels == i] SCREAMING_SNAKE_CASE__ : int = data.shape[1] SCREAMING_SNAKE_CASE__ : List[Any] = data.mean(1 ) if i > 0: # If covariance_sum is not None covariance_sum += device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) else: # If covariance_sum is np.nan (i.e. first loop) SCREAMING_SNAKE_CASE__ : str = device_data * np.dot( column_reshape(lowercase__ ) - column_reshape(lowercase__ ) , (column_reshape(lowercase__ ) - column_reshape(lowercase__ )).T , ) return covariance_sum / features.shape[1] def _a ( lowercase__ : np.ndarray , lowercase__ : int ): '''simple docstring''' if features.any(): SCREAMING_SNAKE_CASE__ : Any = features.mean(1 ) # Center the dataset SCREAMING_SNAKE_CASE__ : Optional[Any] = features - np.reshape(lowercase__ , (data_mean.size, 1) ) SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(lowercase__ , centered_data.T ) / features.shape[1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = np.linalg.eigh(lowercase__ ) # Take all the columns in the reverse order (-1), and then takes only the first SCREAMING_SNAKE_CASE__ : List[Any] = eigenvectors[:, ::-1][:, 0:dimensions] # Project the database on the new space SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.dot(filtered_eigenvectors.T , lowercase__ ) logging.info('Principal Component Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray , lowercase__ : int , lowercase__ : int ): '''simple docstring''' assert classes > dimensions # Check if features have been already loaded if features.any: SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = eigh( covariance_between_classes(lowercase__ , lowercase__ , lowercase__ ) , covariance_within_classes(lowercase__ , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = eigenvectors[:, ::-1][:, :dimensions] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = np.linalg.svd(lowercase__ ) SCREAMING_SNAKE_CASE__ : List[Any] = svd_matrix[:, 0:dimensions] SCREAMING_SNAKE_CASE__ : int = np.dot(filtered_svd_matrix.T , lowercase__ ) logging.info('Linear Discriminant Analysis computed' ) return projected_data else: logging.basicConfig(level=logging.ERROR , format='%(message)s' , force=lowercase__ ) logging.error('Dataset empty' ) raise AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]] ) SCREAMING_SNAKE_CASE__ : Tuple = np.array([0, 0, 0, 1, 1] ) SCREAMING_SNAKE_CASE__ : str = 2 SCREAMING_SNAKE_CASE__ : Dict = 2 # Assert that the function raises an AssertionError if dimensions > classes with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : Optional[int] = linear_discriminant_analysis( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) if isinstance(lowercase__ , np.ndarray ): raise AssertionError( 'Did not raise AssertionError for dimensions > classes' ) assert error_info.type is AssertionError def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]] ) SCREAMING_SNAKE_CASE__ : List[str] = 2 SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([[6.92820323, 8.66025404, 10.39230485], [3.0, 3.0, 3.0]] ) with pytest.raises(lowercase__ ) as error_info: SCREAMING_SNAKE_CASE__ : int = principal_component_analysis(lowercase__ , lowercase__ ) if not np.allclose(lowercase__ , lowercase__ ): raise AssertionError assert error_info.type is AssertionError if __name__ == "__main__": import doctest doctest.testmod()
85
0
import numpy as np import torch from torch.utils.data import Dataset from utils import logger class SCREAMING_SNAKE_CASE__ (UpperCamelCase_ ): def __init__( self : Optional[Any] , __lowerCamelCase : List[str] , __lowerCamelCase : List[str] ): """simple docstring""" lowerCAmelCase__ = params lowerCAmelCase__ = np.array(a_ ) lowerCAmelCase__ = np.array([len(a_ ) for t in data] ) self.check() self.remove_long_sequences() self.remove_empty_sequences() self.remove_unknown_sequences() self.check() self.print_statistics() def __getitem__( self : List[str] , __lowerCamelCase : Optional[Any] ): """simple docstring""" return (self.token_ids[index], self.lengths[index]) def __len__( self : str ): """simple docstring""" return len(self.lengths ) def A__ ( self : Optional[Any] ): """simple docstring""" assert len(self.token_ids ) == len(self.lengths ) assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) ) def A__ ( self : Optional[Any] ): """simple docstring""" lowerCAmelCase__ = self.params.max_model_input_size lowerCAmelCase__ = self.lengths > max_len logger.info(F"""Splitting {sum(a_ )} too long sequences.""" ) def divide_chunks(__lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any] ): return [l[i : i + n] for i in range(0 , len(a_ ) , a_ )] lowerCAmelCase__ = [] lowerCAmelCase__ = [] if self.params.mlm: lowerCAmelCase__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token'] else: lowerCAmelCase__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token'] for seq_, len_ in zip(self.token_ids , self.lengths ): assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_ if len_ <= max_len: new_tok_ids.append(seq_ ) new_lengths.append(len_ ) else: lowerCAmelCase__ = [] for sub_s in divide_chunks(seq_ , max_len - 2 ): if sub_s[0] != cls_id: lowerCAmelCase__ = np.insert(a_ , 0 , a_ ) if sub_s[-1] != sep_id: lowerCAmelCase__ = np.insert(a_ , len(a_ ) , a_ ) assert len(a_ ) <= max_len assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s sub_seqs.append(a_ ) new_tok_ids.extend(a_ ) new_lengths.extend([len(a_ ) for l in sub_seqs] ) lowerCAmelCase__ = np.array(a_ ) lowerCAmelCase__ = np.array(a_ ) def A__ ( self : Union[str, Any] ): """simple docstring""" lowerCAmelCase__ = len(self ) lowerCAmelCase__ = self.lengths > 11 lowerCAmelCase__ = self.token_ids[indices] lowerCAmelCase__ = self.lengths[indices] lowerCAmelCase__ = len(self ) logger.info(F"""Remove {init_size - new_size} too short (<=11 tokens) sequences.""" ) def A__ ( self : Union[str, Any] ): """simple docstring""" if "unk_token" not in self.params.special_tok_ids: return else: lowerCAmelCase__ = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ = len(self ) lowerCAmelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] ) lowerCAmelCase__ = (unk_occs / self.lengths) < 0.5 lowerCAmelCase__ = self.token_ids[indices] lowerCAmelCase__ = self.lengths[indices] lowerCAmelCase__ = len(self ) logger.info(F"""Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).""" ) def A__ ( self : Optional[Any] ): """simple docstring""" if not self.params.is_master: return logger.info(F"""{len(self )} sequences""" ) # data_len = sum(self.lengths) # nb_unique_tokens = len(Counter(list(chain(*self.token_ids)))) # logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)') # unk_idx = self.params.special_tok_ids['unk_token'] # nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids]) # logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)') def A__ ( self : List[Any] , __lowerCamelCase : List[str] ): """simple docstring""" lowerCAmelCase__ = [t[0] for t in batch] lowerCAmelCase__ = [t[1] for t in batch] assert len(a_ ) == len(a_ ) # Max for paddings lowerCAmelCase__ = max(a_ ) # Pad token ids if self.params.mlm: lowerCAmelCase__ = self.params.special_tok_ids['pad_token'] else: lowerCAmelCase__ = self.params.special_tok_ids['unk_token'] lowerCAmelCase__ = [list(t.astype(a_ ) ) + [pad_idx] * (max_seq_len_ - len(a_ )) for t in token_ids] assert len(tk_ ) == len(a_ ) assert all(len(a_ ) == max_seq_len_ for t in tk_ ) lowerCAmelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_) lowerCAmelCase__ = torch.tensor(a_ ) # (bs) return tk_t, lg_t
615
import argparse import logging from collections import namedtuple import torch from model_bertabs import BertAbsSummarizer from models.model_builder import AbsSummarizer # The authors' implementation from transformers import BertTokenizer logging.basicConfig(level=logging.INFO) SCREAMING_SNAKE_CASE__ : Optional[int] = logging.getLogger(__name__) SCREAMING_SNAKE_CASE__ : List[Any] = "Hello world! cécé herlolip" SCREAMING_SNAKE_CASE__ : Dict = namedtuple( "BertAbsConfig", [ "temp_dir", "large", "use_bert_emb", "finetune_bert", "encoder", "share_emb", "max_pos", "enc_layers", "enc_hidden_size", "enc_heads", "enc_ff_size", "enc_dropout", "dec_layers", "dec_hidden_size", "dec_heads", "dec_ff_size", "dec_dropout", ], ) def _a ( lowercase__ : List[str] , lowercase__ : List[Any] ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = BertAbsConfig( temp_dir='.' , finetune_bert=lowercase__ , large=lowercase__ , share_emb=lowercase__ , use_bert_emb=lowercase__ , encoder='bert' , max_pos=5_12 , enc_layers=6 , enc_hidden_size=5_12 , enc_heads=8 , enc_ff_size=5_12 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=7_68 , dec_heads=8 , dec_ff_size=20_48 , dec_dropout=0.2 , ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(lowercase__ , lambda lowercase__ , lowercase__ : storage ) SCREAMING_SNAKE_CASE__ : Any = AbsSummarizer(lowercase__ , torch.device('cpu' ) , lowercase__ ) original.eval() SCREAMING_SNAKE_CASE__ : List[Any] = BertAbsSummarizer(lowercase__ , torch.device('cpu' ) ) new_model.eval() # ------------------- # Convert the weights # ------------------- logging.info('convert the model' ) new_model.bert.load_state_dict(original.bert.state_dict() ) new_model.decoder.load_state_dict(original.decoder.state_dict() ) new_model.generator.load_state_dict(original.generator.state_dict() ) # ---------------------------------- # Make sure the outpus are identical # ---------------------------------- logging.info('Make sure that the models\' outputs are identical' ) SCREAMING_SNAKE_CASE__ : Any = BertTokenizer.from_pretrained('bert-base-uncased' ) # prepare the model inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode('This is sample éàalj\'-.' ) encoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(lowercase__ ).unsqueeze(0 ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode('This is sample 3 éàalj\'-.' ) decoder_input_ids.extend([tokenizer.pad_token_id] * (5_12 - len(lowercase__ )) ) SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(lowercase__ ).unsqueeze(0 ) # failsafe to make sure the weights reset does not affect the # loaded weights. assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0 # forward pass SCREAMING_SNAKE_CASE__ : int = encoder_input_ids SCREAMING_SNAKE_CASE__ : Any = decoder_input_ids SCREAMING_SNAKE_CASE__ : Union[str, Any] = None SCREAMING_SNAKE_CASE__ : Dict = None SCREAMING_SNAKE_CASE__ : str = None SCREAMING_SNAKE_CASE__ : List[str] = None SCREAMING_SNAKE_CASE__ : Optional[Any] = None # The original model does not apply the geneator layer immediatly but rather in # the beam search (where it combines softmax + linear layer). Since we already # apply the softmax in our generation process we only apply the linear layer here. # We make sure that the outputs of the full stack are identical SCREAMING_SNAKE_CASE__ : Optional[Any] = original(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = original.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = new_model( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = new_model.generator(lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = torch.max(torch.abs(output_converted_model - output_original_model ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item() print('Maximum absolute difference beween weights: {:.2f}'.format(lowercase__ ) ) SCREAMING_SNAKE_CASE__ : List[Any] = torch.allclose(lowercase__ , lowercase__ , atol=1E-3 ) if are_identical: logging.info('all weights are equal up to 1e-3' ) else: raise ValueError('the weights are different. The new model is likely different from the original one.' ) # The model has been saved with torch.save(model) and this is bound to the exact # directory structure. We save the state_dict instead. logging.info('saving the model\'s state dictionary' ) torch.save( new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' ) if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Tuple = argparse.ArgumentParser() parser.add_argument( "--bertabs_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model.", ) SCREAMING_SNAKE_CASE__ : Tuple = parser.parse_args() convert_bertabs_checkpoints( args.bertabs_checkpoint_path, args.pytorch_dump_folder_path, )
85
0
"""simple docstring""" import collections import tempfile import unittest import numpy as np from transformers.testing_utils import ( is_pt_flax_cross_test, require_flax, require_torch, require_vision, slow, torch_device, ) from transformers.utils import is_flax_available, is_torch_available, is_vision_available from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_flax_bert import FlaxBertModelTester from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester from ..vit.test_modeling_flax_vit import FlaxViTModelTester if is_flax_available(): from transformers import ( FlaxBertModel, FlaxCLIPVisionModel, FlaxVisionTextDualEncoderModel, FlaxViTModel, VisionTextDualEncoderConfig, VisionTextDualEncoderProcessor, ) from transformers.modeling_flax_pytorch_utils import ( convert_pytorch_state_dict_to_flax, load_flax_weights_in_pytorch_model, ) if is_torch_available(): import torch from transformers import VisionTextDualEncoderModel if is_vision_available(): from PIL import Image def _snake_case ( _snake_case : Dict ) -> int: '''simple docstring''' if isinstance(lowercase__ , collections.abc.Iterable ): return x return (x, x) @require_flax class lowercase_ : '''simple docstring''' def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] ): pass def lowerCAmelCase_ ( self : Union[str, Any] ): pass def lowerCAmelCase_ ( self : Union[str, Any] ): pass def lowerCAmelCase_ ( self : Any , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : np.ndarray , _UpperCAmelCase : float ): _A = np.abs((a - b) ).max() self.assertLessEqual(a_ , a_ , F'''Difference between torch and flax is {diff} (>= {tol}).''' ) def lowerCAmelCase_ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : List[Any]=None , **_UpperCAmelCase : Optional[int] ): _A = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ ) _A = FlaxVisionTextDualEncoderModel(a_ ) _A = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int]=None , **_UpperCAmelCase : List[Any] ): _A = self.get_vision_text_model(a_ , a_ ) _A = {'vision_model': vision_model, 'text_model': text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ ) _A = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ ) self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) ) def lowerCAmelCase_ ( self : Dict , _UpperCAmelCase : Any , _UpperCAmelCase : Any , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : int=None , **_UpperCAmelCase : Any ): _A = self.get_vision_text_model(a_ , a_ ) _A = {'vision_model': vision_model, 'text_model': text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ ) _A = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ ) _A = output[0] with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(a_ ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(a_ ) _A = model(input_ids=a_ , pixel_values=a_ , attention_mask=a_ ) _A = after_output[0] _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(a_ , 1E-3 ) def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] , _UpperCAmelCase : int , _UpperCAmelCase : int=None , **_UpperCAmelCase : Union[str, Any] ): _A = self.get_vision_text_model(a_ , a_ ) _A = {'vision_model': vision_model, 'text_model': text_model} _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**a_ ) _A = model( input_ids=a_ , pixel_values=a_ , attention_mask=a_ , output_attentions=a_ ) _A = output.vision_model_output.attentions self.assertEqual(len(a_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) _A = to_atuple(vision_model.config.image_size ) _A = to_atuple(vision_model.config.patch_size ) _A = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) _A = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) _A = output.text_model_output.attentions self.assertEqual(len(a_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[str] ): pt_model.to(a_ ) pt_model.eval() # prepare inputs _A = inputs_dict _A = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()} with torch.no_grad(): _A = pt_model(**a_ ).to_tuple() _A = fx_model(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output in zip(fx_outputs[:4] , pt_outputs[:4] ): self.assert_almost_equals(a_ , pt_output.numpy() , 4E-2 ) # PT -> Flax with tempfile.TemporaryDirectory() as tmpdirname: pt_model.save_pretrained(a_ ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(a_ , from_pt=a_ ) _A = fx_model_loaded(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] , pt_outputs[:4] ): self.assert_almost_equals(a_ , pt_output.numpy() , 4E-2 ) # Flax -> PT with tempfile.TemporaryDirectory() as tmpdirname: fx_model.save_pretrained(a_ ) _A = VisionTextDualEncoderModel.from_pretrained(a_ , from_flax=a_ ) pt_model_loaded.to(a_ ) pt_model_loaded.eval() with torch.no_grad(): _A = pt_model_loaded(**a_ ).to_tuple() self.assertEqual(len(a_ ) , len(a_ ) , 'Output lengths differ between Flax and PyTorch' ) for fx_output, pt_output_loaded in zip(fx_outputs[:4] , pt_outputs_loaded[:4] ): self.assert_almost_equals(a_ , pt_output_loaded.numpy() , 4E-2 ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[str] , _UpperCAmelCase : str , _UpperCAmelCase : Tuple ): _A = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ ) _A = VisionTextDualEncoderModel(a_ ) _A = FlaxVisionTextDualEncoderModel(a_ ) _A = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , a_ ) _A = fx_state self.check_pt_flax_equivalence(a_ , a_ , a_ ) def lowerCAmelCase_ ( self : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : int , _UpperCAmelCase : List[Any] ): _A = VisionTextDualEncoderConfig.from_vision_text_configs(a_ , a_ ) _A = VisionTextDualEncoderModel(a_ ) _A = FlaxVisionTextDualEncoderModel(a_ ) _A = load_flax_weights_in_pytorch_model(a_ , fx_model.params ) self.check_pt_flax_equivalence(a_ , a_ , a_ ) def lowerCAmelCase_ ( self : Union[str, Any] ): _A = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**a_ ) def lowerCAmelCase_ ( self : Any ): _A = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**a_ ) def lowerCAmelCase_ ( self : str ): _A = self.prepare_config_and_inputs() self.check_save_load(**a_ ) def lowerCAmelCase_ ( self : int ): _A = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**a_ ) @is_pt_flax_cross_test def lowerCAmelCase_ ( self : int ): _A = self.prepare_config_and_inputs() _A = config_inputs_dict.pop('vision_config' ) _A = config_inputs_dict.pop('text_config' ) _A = config_inputs_dict self.check_equivalence_pt_to_flax(a_ , a_ , a_ ) self.check_equivalence_flax_to_pt(a_ , a_ , a_ ) @slow def lowerCAmelCase_ ( self : str ): _A = self.get_pretrained_model_and_inputs() _A = model_a(**a_ ) _A = outputs[0] with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(a_ ) _A = FlaxVisionTextDualEncoderModel.from_pretrained(a_ ) _A = model_a(**a_ ) _A = after_outputs[0] _A = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(a_ , 1E-5 ) @require_flax class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : Union[str, Any] ): _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-bert' , vision_from_pt=a_ , text_from_pt=a_ , ) _A = 13 _A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _A = random_attention_mask([batch_size, 4] ) _A = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCAmelCase_ ( self : List[str] , _UpperCAmelCase : Any , _UpperCAmelCase : Tuple ): _A = FlaxViTModel(a_ ) _A = FlaxBertModel(a_ ) return vision_model, text_model def lowerCAmelCase_ ( self : Union[str, Any] ): _A = FlaxViTModelTester(self ) _A = FlaxBertModelTester(self ) _A = vit_model_tester.prepare_config_and_inputs() _A = bert_model_tester.prepare_config_and_inputs() _A = vision_config_and_inputs _A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_torch class lowercase_ ( UpperCamelCase_ , unittest.TestCase ): '''simple docstring''' def lowerCAmelCase_ ( self : List[Any] ): _A = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained( 'hf-internal-testing/tiny-random-clip' , 'hf-internal-testing/tiny-bert' , vision_from_pt=a_ , text_from_pt=a_ , ) _A = 13 _A = floats_tensor( [ batch_size, model.config.vision_config.num_channels, model.config.vision_config.image_size, model.config.vision_config.image_size, ] ) _A = ids_tensor([batch_size, 4] , model.config.text_config.vocab_size ) _A = random_attention_mask([batch_size, 4] ) _A = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask} return model, inputs def lowerCAmelCase_ ( self : str , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : str ): _A = FlaxCLIPVisionModel(a_ ) _A = FlaxBertModel(a_ ) return vision_model, text_model def lowerCAmelCase_ ( self : str ): _A = FlaxCLIPVisionModelTester(self ) _A = FlaxBertModelTester(self ) _A = clip_model_tester.prepare_config_and_inputs() _A = bert_model_tester.prepare_config_and_inputs() _A = vision_config_and_inputs _A = text_config_and_inputs # make sure that cross attention layers are added return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": attention_mask, "input_ids": input_ids, "token_type_ids": token_type_ids, } @require_flax @require_vision class lowercase_ ( unittest.TestCase ): '''simple docstring''' @slow def lowerCAmelCase_ ( self : Optional[int] ): _A = FlaxVisionTextDualEncoderModel.from_pretrained('clip-italian/clip-italian' , logit_scale_init_value=1.0 ) _A = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' ) _A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) _A = processor( text=['una foto di un gatto', 'una foto di un cane'] , images=a_ , padding=a_ , return_tensors='np' ) _A = model(**a_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) _A = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image , a_ , atol=1E-3 ) )
7
from __future__ import annotations import inspect import unittest from typing import List, Tuple from transformers import RegNetConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFRegNetForImageClassification, TFRegNetModel if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class snake_case : def __init__( self : Tuple , a_ : int , a_ : Optional[int]=3 , a_ : Tuple=32 , a_ : Any=3 , a_ : Tuple=10 , a_ : Optional[int]=[10, 20, 30, 40] , a_ : List[Any]=[1, 1, 2, 1] , a_ : int=True , a_ : Optional[Any]=True , a_ : Any="relu" , a_ : int=3 , a_ : List[Any]=None , )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = parent SCREAMING_SNAKE_CASE__ : Optional[int] = batch_size SCREAMING_SNAKE_CASE__ : int = image_size SCREAMING_SNAKE_CASE__ : Tuple = num_channels SCREAMING_SNAKE_CASE__ : Tuple = embeddings_size SCREAMING_SNAKE_CASE__ : str = hidden_sizes SCREAMING_SNAKE_CASE__ : Optional[int] = depths SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels SCREAMING_SNAKE_CASE__ : Dict = hidden_act SCREAMING_SNAKE_CASE__ : Tuple = num_labels SCREAMING_SNAKE_CASE__ : List[Any] = scope SCREAMING_SNAKE_CASE__ : str = len(a_ ) def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) SCREAMING_SNAKE_CASE__ : Any = None if self.use_labels: SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_config() return config, pixel_values, labels def __lowercase( self : str )-> str: """simple docstring""" return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __lowercase( self : List[str] , a_ : int , a_ : Any , a_ : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFRegNetModel(config=a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(a_ , training=a_ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , ) def __lowercase( self : Union[str, Any] , a_ : Dict , a_ : int , a_ : Optional[Any] )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = self.num_labels SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetForImageClassification(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = model(a_ , labels=a_ , training=a_ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __lowercase( self : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.prepare_config_and_inputs() SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = config_and_inputs SCREAMING_SNAKE_CASE__ : Optional[Any] = {'pixel_values': pixel_values} return config, inputs_dict @require_tf class snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = (TFRegNetModel, TFRegNetForImageClassification) if is_tf_available() else () lowercase_ = ( {'feature-extraction': TFRegNetModel, 'image-classification': TFRegNetForImageClassification} if is_tf_available() else {} ) lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False lowercase_ = False def __lowercase( self : int )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFRegNetModelTester(self ) SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=a_ , has_text_modality=a_ ) def __lowercase( self : List[Any] )-> Tuple: """simple docstring""" return @unittest.skip(reason='RegNet does not use inputs_embeds' ) def __lowercase( self : str )-> Optional[int]: """simple docstring""" pass @unittest.skipIf( not is_tf_available() or len(tf.config.list_physical_devices('GPU' ) ) == 0 , reason='TF does not support backprop for grouped convolutions on CPU.' , ) @slow def __lowercase( self : Any )-> List[Any]: """simple docstring""" super().test_keras_fit() @unittest.skip(reason='RegNet does not support input and output embeddings' ) def __lowercase( self : Any )-> List[Any]: """simple docstring""" pass def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = inspect.signature(model.call ) # signature.parameters is an OrderedDict => so arg_names order is deterministic SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()] SCREAMING_SNAKE_CASE__ : Optional[int] = ['pixel_values'] self.assertListEqual(arg_names[:1] , a_ ) def __lowercase( self : str )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*a_ ) def __lowercase( self : List[Any] )-> Optional[Any]: """simple docstring""" def check_hidden_states_output(a_ : int , a_ : Union[str, Any] , a_ : Tuple ): SCREAMING_SNAKE_CASE__ : Any = model_class(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**self._prepare_for_class(a_ , a_ ) , training=a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(a_ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common() SCREAMING_SNAKE_CASE__ : Dict = ['basic', 'bottleneck'] for model_class in self.all_model_classes: for layer_type in layers_type: SCREAMING_SNAKE_CASE__ : List[Any] = layer_type SCREAMING_SNAKE_CASE__ : Union[str, Any] = True check_hidden_states_output(a_ , a_ , a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] SCREAMING_SNAKE_CASE__ : int = True check_hidden_states_output(a_ , a_ , a_ ) def __lowercase( self : Optional[int] )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common() def check_equivalence(a_ : str , a_ : Tuple , a_ : Optional[int] , a_ : Union[str, Any]={} ): SCREAMING_SNAKE_CASE__ : int = model(a_ , return_dict=a_ , **a_ ) SCREAMING_SNAKE_CASE__ : str = model(a_ , return_dict=a_ , **a_ ).to_tuple() def recursive_check(a_ : List[Any] , a_ : int ): if isinstance(a_ , (List, Tuple) ): for tuple_iterable_value, dict_iterable_value in zip(a_ , a_ ): recursive_check(a_ , a_ ) elif tuple_object is None: return else: self.assertTrue( all(tf.equal(a_ , a_ ) ) , msg=( 'Tuple and dict output are not equal. Difference:' F''' {tf.math.reduce_max(tf.abs(tuple_object - dict_object ) )}''' ) , ) recursive_check(a_ , a_ ) for model_class in self.all_model_classes: SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(a_ ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self._prepare_for_class(a_ , a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(a_ , a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) SCREAMING_SNAKE_CASE__ : int = self._prepare_for_class(a_ , a_ , return_labels=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._prepare_for_class(a_ , a_ , return_labels=a_ ) check_equivalence(a_ , a_ , a_ , {'output_hidden_states': True} ) def __lowercase( self : str )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*a_ ) @slow def __lowercase( self : Any )-> List[str]: """simple docstring""" for model_name in TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: SCREAMING_SNAKE_CASE__ : Optional[int] = TFRegNetModel.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ) return image @require_tf @require_vision class snake_case ( unittest.TestCase ): @cached_property def __lowercase( self : List[Any] )-> int: """simple docstring""" return ( AutoImageProcessor.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __lowercase( self : Any )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = TFRegNetForImageClassification.from_pretrained(TF_REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) SCREAMING_SNAKE_CASE__ : List[Any] = self.default_image_processor SCREAMING_SNAKE_CASE__ : Any = prepare_img() SCREAMING_SNAKE_CASE__ : str = image_processor(images=a_ , return_tensors='tf' ) # forward pass SCREAMING_SNAKE_CASE__ : Tuple = model(**a_ , training=a_ ) # verify the logits SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 1000) ) self.assertEqual(outputs.logits.shape , a_ ) SCREAMING_SNAKE_CASE__ : Any = tf.constant([-0.4180, -1.5051, -3.4836] ) tf.debugging.assert_near(outputs.logits[0, :3] , a_ , atol=1e-4 )
85
0
from sympy import diff, lambdify, symbols from sympy.functions import * # noqa: F403 def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : complex , SCREAMING_SNAKE_CASE__ : str = "x" , SCREAMING_SNAKE_CASE__ : float = 10**-10 , SCREAMING_SNAKE_CASE__ : int = 1 , ) -> List[str]: '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = symbols(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = lambdify(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = starting_point while True: if diff_function(lowercase__ ) != 0: SCREAMING_SNAKE_CASE__ : Optional[Any] = prev_guess - multiplicity * func(lowercase__ ) / diff_function( lowercase__ ) else: raise ZeroDivisionError("Could not find root" ) from None # Precision is checked by comparing the difference of consecutive guesses if abs(next_guess - prev_guess ) < precision: return next_guess SCREAMING_SNAKE_CASE__ : Optional[int] = next_guess # Let's Execute if __name__ == "__main__": # Find root of trigonometric function # Find value of pi print(f"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}") # Find root of polynomial # Find fourth Root of 5 print(f"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}") # Find value of e print( '''The root of log(y) - 1 = 0 is ''', f"{newton_raphson('log(y) - 1', 2, variable='y')}", ) # Exponential Roots print( '''The root of exp(x) - 1 = 0 is''', f"{newton_raphson('exp(x) - 1', 1_0, precision=0.005)}", ) # Find root of cos(x) print(f"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}")
663
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : Optional[Any] = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[Any] = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Tuple = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig _SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__) _SCREAMING_SNAKE_CASE : Optional[Any] = "T5Config" def lowerCamelCase__ ( _lowerCamelCase : jnp.array , _lowerCamelCase : int , _lowerCamelCase : int ) -> str: lowerCamelCase_ = jnp.zeros_like(lowercase__ ) lowerCamelCase_ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) lowerCamelCase_ = shifted_input_ids.at[:, 0].set(lowercase__ ) lowerCamelCase_ = jnp.where(shifted_input_ids == -100 , lowercase__ , lowercase__ ) return shifted_input_ids class a ( UpperCamelCase_ ): SCREAMING_SNAKE_CASE : Optional[Any] = """mt5""" SCREAMING_SNAKE_CASE : List[str] = MTaConfig class a ( UpperCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = """mt5""" SCREAMING_SNAKE_CASE : Any = MTaConfig class a ( UpperCamelCase_ ): SCREAMING_SNAKE_CASE : Tuple = """mt5""" SCREAMING_SNAKE_CASE : Any = MTaConfig
549
def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) if n == 0: return 0 SCREAMING_SNAKE_CASE__ : str = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : int = max( lowercase__ , prices[i - 1] + naive_cut_rod_recursive(n - i , lowercase__ ) ) return max_revue def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : str = [float('-inf' ) for _ in range(n + 1 )] return _top_down_cut_rod_recursive(lowercase__ , lowercase__ , lowercase__ ) def _a ( lowercase__ : int , lowercase__ : list , lowercase__ : list ): '''simple docstring''' if max_rev[n] >= 0: return max_rev[n] elif n == 0: return 0 else: SCREAMING_SNAKE_CASE__ : List[str] = float('-inf' ) for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Any = max( lowercase__ , prices[i - 1] + _top_down_cut_rod_recursive(n - i , lowercase__ , lowercase__ ) , ) SCREAMING_SNAKE_CASE__ : Tuple = max_revenue return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' _enforce_args(lowercase__ , lowercase__ ) # length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of # length 0. SCREAMING_SNAKE_CASE__ : Optional[int] = [float('-inf' ) for _ in range(n + 1 )] SCREAMING_SNAKE_CASE__ : int = 0 for i in range(1 , n + 1 ): SCREAMING_SNAKE_CASE__ : Optional[Any] = max_rev[i] for j in range(1 , i + 1 ): SCREAMING_SNAKE_CASE__ : Union[str, Any] = max(lowercase__ , prices[j - 1] + max_rev[i - j] ) SCREAMING_SNAKE_CASE__ : Dict = max_revenue_i return max_rev[n] def _a ( lowercase__ : int , lowercase__ : list ): '''simple docstring''' if n < 0: SCREAMING_SNAKE_CASE__ : Tuple = f'''n must be greater than or equal to 0. Got n = {n}''' raise ValueError(lowercase__ ) if n > len(lowercase__ ): SCREAMING_SNAKE_CASE__ : Tuple = ( 'Each integral piece of rod must have a corresponding price. ' f'''Got n = {n} but length of prices = {len(lowercase__ )}''' ) raise ValueError(lowercase__ ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = [6, 10, 12, 15, 20, 23] SCREAMING_SNAKE_CASE__ : Optional[int] = len(lowercase__ ) # the best revenue comes from cutting the rod into 6 pieces, each # of length 1 resulting in a revenue of 6 * 6 = 36. SCREAMING_SNAKE_CASE__ : Optional[Any] = 36 SCREAMING_SNAKE_CASE__ : Tuple = top_down_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Optional[int] = bottom_up_cut_rod(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : List[str] = naive_cut_rod_recursive(lowercase__ , lowercase__ ) assert expected_max_revenue == max_rev_top_down assert max_rev_top_down == max_rev_bottom_up assert max_rev_bottom_up == max_rev_naive if __name__ == "__main__": main()
85
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_tokenizers_available, is_torch_available, ) lowerCamelCase_ = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["FNetTokenizer"] try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = ["FNetTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCamelCase_ = [ "FNET_PRETRAINED_MODEL_ARCHIVE_LIST", "FNetForMaskedLM", "FNetForMultipleChoice", "FNetForNextSentencePrediction", "FNetForPreTraining", "FNetForQuestionAnswering", "FNetForSequenceClassification", "FNetForTokenClassification", "FNetLayer", "FNetModel", "FNetPreTrainedModel", ] if TYPE_CHECKING: from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet import FNetTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_fnet_fast import FNetTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_fnet import ( FNET_PRETRAINED_MODEL_ARCHIVE_LIST, FNetForMaskedLM, FNetForMultipleChoice, FNetForNextSentencePrediction, FNetForPreTraining, FNetForQuestionAnswering, FNetForSequenceClassification, FNetForTokenClassification, FNetLayer, FNetModel, FNetPreTrainedModel, ) else: import sys lowerCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
498
import unittest from transformers import CamembertTokenizer, CamembertTokenizerFast from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from transformers.utils import is_torch_available from ...test_tokenization_common import TokenizerTesterMixin SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_tests_dir("fixtures/test_sentencepiece.model") SCREAMING_SNAKE_CASE__ : Optional[int] = get_tests_dir("fixtures/test_sentencepiece_bpe.model") SCREAMING_SNAKE_CASE__ : Any = "pt" if is_torch_available() else "tf" @require_sentencepiece @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = CamembertTokenizer lowercase_ = CamembertTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Tuple )-> str: """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing SCREAMING_SNAKE_CASE__ : Dict = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) def __lowercase( self : Any )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = '<pad>' SCREAMING_SNAKE_CASE__ : int = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(a_ ) , a_ ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(a_ ) , a_ ) def __lowercase( self : Optional[Any] )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = list(self.get_tokenizer().get_vocab().keys() ) self.assertEqual(vocab_keys[0] , '<s>NOTUSED' ) self.assertEqual(vocab_keys[1] , '<pad>' ) self.assertEqual(vocab_keys[-1] , '<mask>' ) self.assertEqual(len(a_ ) , 1004 ) def __lowercase( self : Union[str, Any] )-> Optional[Any]: """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 1005 ) def __lowercase( self : List[Any] )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = CamembertTokenizer(a_ ) tokenizer.save_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : int = CamembertTokenizerFast.from_pretrained(self.tmpdirname ) SCREAMING_SNAKE_CASE__ : str = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) # <unk> tokens are not the same for `rust` than for `slow`. # Because spm gives back raw token instead of `unk` in EncodeAsPieces # tokens = tokenizer.tokenize(sequence) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" if not self.test_rust_tokenizer: return SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_tokenizer() SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Tuple = 'I was born in 92000, and this is falsé.' SCREAMING_SNAKE_CASE__ : str = tokenizer.tokenize(a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = rust_tokenizer.tokenize(a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = rust_tokenizer.encode(a_ , add_special_tokens=a_ ) self.assertListEqual(a_ , a_ ) SCREAMING_SNAKE_CASE__ : int = self.get_rust_tokenizer() SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(a_ ) self.assertListEqual(a_ , a_ ) @slow def __lowercase( self : List[str] )-> Dict: """simple docstring""" # fmt: off SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'input_ids': [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501 # fmt: on # camembert is a french model. So we also use french texts. SCREAMING_SNAKE_CASE__ : str = [ 'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, ' 'utilisé principalement dans le domaine du traitement automatique des langues (TAL).', 'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus ' 'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches ' 'telles que la traduction et la synthèse de texte.', ] self.tokenizer_integration_test_util( expected_encoding=a_ , model_name='camembert-base' , revision='3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf' , sequences=a_ , )
85
0
'''simple docstring''' import glob import os import random from string import ascii_lowercase, digits import cva A__ : List[Any] = "" A__ : Optional[int] = "" A__ : Optional[Any] = "" A__ : int = 1 # (0 is vertical, 1 is horizontal) def a_ ( ) -> Any: __snake_case : Tuple = get_dataset(lowercase__ ,lowercase__ ) print('Processing...' ) __snake_case : Tuple = update_image_and_anno(lowercase__ ,lowercase__ ,lowercase__ ) for index, image in enumerate(lowercase__ ): # Get random string code: '7b7ad245cdff75241935e4dd860f3bad' __snake_case : Optional[Any] = random_chars(32 ) __snake_case : Optional[Any] = paths[index].split(os.sep )[-1].rsplit('.' ,1 )[0] __snake_case : Union[str, Any] = f'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}''' cva.imwrite(f'''/{file_root}.jpg''' ,lowercase__ ,[cva.IMWRITE_JPEG_QUALITY, 85] ) print(f'''Success {index+1}/{len(lowercase__ )} with {file_name}''' ) __snake_case : str = [] for anno in new_annos[index]: __snake_case : Optional[int] = f'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}''' annos_list.append(lowercase__ ) with open(f'''/{file_root}.txt''' ,'w' ) as outfile: outfile.write('\n'.join(line for line in annos_list ) ) def a_ ( _UpperCAmelCase : str ,_UpperCAmelCase : str ) -> int: __snake_case : Optional[Any] = [] __snake_case : Tuple = [] for label_file in glob.glob(os.path.join(lowercase__ ,'*.txt' ) ): __snake_case : Dict = label_file.split(os.sep )[-1].rsplit('.' ,1 )[0] with open(lowercase__ ) as in_file: __snake_case : Optional[Any] = in_file.readlines() __snake_case : Tuple = os.path.join(lowercase__ ,f'''{label_name}.jpg''' ) __snake_case : str = [] for obj_list in obj_lists: __snake_case : List[Any] = obj_list.rstrip('\n' ).split(' ' ) boxes.append( [ int(obj[0] ), float(obj[1] ), float(obj[2] ), float(obj[3] ), float(obj[4] ), ] ) if not boxes: continue img_paths.append(lowercase__ ) labels.append(lowercase__ ) return img_paths, labels def a_ ( _UpperCAmelCase : list ,_UpperCAmelCase : list ,_UpperCAmelCase : int = 1 ) -> Tuple: __snake_case : str = [] __snake_case : str = [] __snake_case : int = [] for idx in range(len(lowercase__ ) ): __snake_case : int = [] __snake_case : Tuple = img_list[idx] path_list.append(lowercase__ ) __snake_case : Any = anno_list[idx] __snake_case : List[str] = cva.imread(lowercase__ ) if flip_type == 1: __snake_case : int = cva.flip(lowercase__ ,lowercase__ ) for bbox in img_annos: __snake_case : Union[str, Any] = 1 - bbox[1] new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] ) elif flip_type == 0: __snake_case : Optional[Any] = cva.flip(lowercase__ ,lowercase__ ) for bbox in img_annos: __snake_case : Dict = 1 - bbox[2] new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] ) new_annos_lists.append(lowercase__ ) new_imgs_list.append(lowercase__ ) return new_imgs_list, new_annos_lists, path_list def a_ ( _UpperCAmelCase : int = 32 ) -> Optional[int]: assert number_char > 1, "The number of character should greater than 1" __snake_case : Dict = ascii_lowercase + digits return "".join(random.choice(lowercase__ ) for _ in range(lowercase__ ) ) if __name__ == "__main__": main() print('''DONE ✅''')
286
from typing import TYPE_CHECKING from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available from ...utils import OptionalDependencyNotAvailable SCREAMING_SNAKE_CASE__ : Any = {"configuration_dpt": ["DPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPTConfig"]} try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : List[str] = ["DPTFeatureExtractor"] SCREAMING_SNAKE_CASE__ : Tuple = ["DPTImageProcessor"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[Any] = [ "DPT_PRETRAINED_MODEL_ARCHIVE_LIST", "DPTForDepthEstimation", "DPTForSemanticSegmentation", "DPTModel", "DPTPreTrainedModel", ] if TYPE_CHECKING: from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_dpt import DPTFeatureExtractor from .image_processing_dpt import DPTImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_dpt import ( DPT_PRETRAINED_MODEL_ARCHIVE_LIST, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel, DPTPreTrainedModel, ) else: import sys SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
'''simple docstring''' from __future__ import annotations import requests def UpperCamelCase_ ( A__ : str ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = f'https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty' return requests.get(lowercase__ ).json() def UpperCamelCase_ ( A__ : int = 10 ): '''simple docstring''' lowerCAmelCase_ : Dict = 'https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty' lowerCAmelCase_ : Optional[Any] = requests.get(lowercase__ ).json()[:max_stories] return [get_hackernews_story(lowercase__ ) for story_id in story_ids] def UpperCamelCase_ ( A__ : int = 10 ): '''simple docstring''' lowerCAmelCase_ : Optional[Any] = hackernews_top_stories(lowercase__ ) return "\n".join("""* [{title}]({url})""".format(**lowercase__ ) for story in stories ) if __name__ == "__main__": print(hackernews_top_stories_as_markdown())
275
from typing import Optional, Union import numpy as np from ...image_processing_utils import BaseImageProcessor, BatchFeature from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images from ...utils import TensorType, logging SCREAMING_SNAKE_CASE__ : List[Any] = logging.get_logger(__name__) class snake_case ( UpperCamelCase_ ): lowercase_ = ['pixel_values'] def __init__( self : List[Any] , a_ : bool = True , a_ : Union[int, float] = 1 / 255 , a_ : bool = True , a_ : int = 8 , **a_ : Union[str, Any] , )-> None: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = do_rescale SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor SCREAMING_SNAKE_CASE__ : Dict = do_pad SCREAMING_SNAKE_CASE__ : Any = pad_size def __lowercase( self : str , a_ : np.ndarray , a_ : float , a_ : Optional[Union[str, ChannelDimension]] = None , **a_ : str )-> np.ndarray: """simple docstring""" return rescale(a_ , scale=a_ , data_format=a_ , **a_ ) def __lowercase( self : Any , a_ : np.ndarray , a_ : int , a_ : Optional[Union[str, ChannelDimension]] = None )-> Tuple: """simple docstring""" SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = get_image_size(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = (old_height // size + 1) * size - old_height SCREAMING_SNAKE_CASE__ : List[Any] = (old_width // size + 1) * size - old_width return pad(a_ , ((0, pad_height), (0, pad_width)) , mode='symmetric' , data_format=a_ ) def __lowercase( self : Tuple , a_ : ImageInput , a_ : Optional[bool] = None , a_ : Optional[float] = None , a_ : Optional[bool] = None , a_ : Optional[int] = None , a_ : Optional[Union[str, TensorType]] = None , a_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **a_ : Dict , )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = do_rescale if do_rescale is not None else self.do_rescale SCREAMING_SNAKE_CASE__ : Tuple = rescale_factor if rescale_factor is not None else self.rescale_factor SCREAMING_SNAKE_CASE__ : List[str] = do_pad if do_pad is not None else self.do_pad SCREAMING_SNAKE_CASE__ : List[str] = pad_size if pad_size is not None else self.pad_size SCREAMING_SNAKE_CASE__ : Tuple = make_list_of_images(a_ ) if not valid_images(a_ ): raise ValueError( 'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, ' 'torch.Tensor, tf.Tensor or jax.ndarray.' ) if do_rescale and rescale_factor is None: raise ValueError('Rescale factor must be specified if do_rescale is True.' ) # All transformations expect numpy arrays. SCREAMING_SNAKE_CASE__ : List[str] = [to_numpy_array(a_ ) for image in images] if do_rescale: SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.rescale(image=a_ , scale=a_ ) for image in images] if do_pad: SCREAMING_SNAKE_CASE__ : str = [self.pad(a_ , size=a_ ) for image in images] SCREAMING_SNAKE_CASE__ : List[str] = [to_channel_dimension_format(a_ , a_ ) for image in images] SCREAMING_SNAKE_CASE__ : Tuple = {'pixel_values': images} return BatchFeature(data=a_ , tensor_type=a_ )
85
0
from PIL import Image def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]: lowercase : str = (259 * (level + 255)) / (255 * (259 - level)) def contrast(SCREAMING_SNAKE_CASE__ ) -> int: return int(128 + factor * (c - 128) ) return img.point(lowercase__ ) if __name__ == "__main__": # Load image with Image.open("""image_data/lena.jpg""") as img: # Change contrast to 170 lowercase : Optional[int] = change_contrast(img, 170) cont_img.save("""image_data/lena_high_contrast.png""", format="""png""")
336
from pathlib import Path import numpy as np from PIL import Image def _a ( lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2] return 0.2989 * r + 0.5870 * g + 0.1140 * b def _a ( lowercase__ : np.ndarray ): '''simple docstring''' return (gray > 1_27) & (gray <= 2_55) def _a ( lowercase__ : np.ndarray , lowercase__ : np.ndarray ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = np.zeros_like(lowercase__ ) SCREAMING_SNAKE_CASE__ : str = np.zeros( (image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) ) # Copy image to padded image SCREAMING_SNAKE_CASE__ : Optional[Any] = image # Iterate over image & apply kernel for x in range(image.shape[1] ): for y in range(image.shape[0] ): SCREAMING_SNAKE_CASE__ : List[Any] = ( kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]] ).sum() SCREAMING_SNAKE_CASE__ : List[str] = int(summation > 0 ) return output if __name__ == "__main__": # read original image SCREAMING_SNAKE_CASE__ : int = Path(__file__).resolve().parent / "image_data" / "lena.jpg" SCREAMING_SNAKE_CASE__ : int = np.array(Image.open(lena_path)) # kernel to be applied SCREAMING_SNAKE_CASE__ : str = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]]) SCREAMING_SNAKE_CASE__ : Optional[int] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element) # Save the output image SCREAMING_SNAKE_CASE__ : Optional[int] = Image.fromarray(output).convert("RGB") pil_img.save("result_dilation.png")
85
0
"""simple docstring""" from __future__ import annotations import os from collections.abc import Mapping _A = tuple[int, int] class __UpperCAmelCase : """simple docstring""" def __init__( self : Tuple , A_ : set[int] , A_ : Mapping[EdgeT, int] )-> None: __UpperCamelCase = vertices __UpperCamelCase = { (min(a_ ), max(a_ )): weight for edge, weight in edges.items() } def A ( self : Optional[int] , A_ : EdgeT , A_ : int )-> None: self.vertices.add(edge[0] ) self.vertices.add(edge[1] ) __UpperCamelCase = weight def A ( self : Optional[Any] )-> Graph: __UpperCamelCase = Graph({min(self.vertices )} , {} ) __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 while len(subgraph.vertices ) < len(self.vertices ): __UpperCamelCase = max(self.edges.values() ) + 1 for edge, weight in self.edges.items(): if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices): if weight < min_weight: __UpperCamelCase = edge __UpperCamelCase = weight subgraph.add_edge(a_ , a_ ) return subgraph def lowercase (_snake_case = "p107_network.txt" ) -> Union[str, Any]: '''simple docstring''' __UpperCamelCase = os.path.abspath(os.path.dirname(lowercase__ ) ) __UpperCamelCase = os.path.join(lowercase__ ,lowercase__ ) __UpperCamelCase = {} __UpperCamelCase = 42 __UpperCamelCase = 42 __UpperCamelCase = 42 with open(lowercase__ ) as f: __UpperCamelCase = f.read().strip().split("\n" ) __UpperCamelCase = [line.split("," ) for line in data] for edgea in range(1 ,len(lowercase__ ) ): for edgea in range(lowercase__ ): if adjaceny_matrix[edgea][edgea] != "-": __UpperCamelCase = int(adjaceny_matrix[edgea][edgea] ) __UpperCamelCase = Graph(set(range(len(lowercase__ ) ) ) ,lowercase__ ) __UpperCamelCase = graph.prims_algorithm() __UpperCamelCase = sum(graph.edges.values() ) __UpperCamelCase = sum(subgraph.edges.values() ) return initial_total - optimal_total if __name__ == "__main__": print(f"""{solution() = }""")
505
def _a ( lowercase__ : int = 60_08_51_47_51_43 ): '''simple docstring''' try: SCREAMING_SNAKE_CASE__ : Dict = int(lowercase__ ) except (TypeError, ValueError): raise TypeError('Parameter n must be int or castable to int.' ) if n <= 0: raise ValueError('Parameter n must be greater than or equal to one.' ) SCREAMING_SNAKE_CASE__ : int = 2 SCREAMING_SNAKE_CASE__ : int = 0 if n == 2: return 2 while n > 2: while n % i != 0: i += 1 SCREAMING_SNAKE_CASE__ : str = i while n % i == 0: SCREAMING_SNAKE_CASE__ : List[Any] = n // i i += 1 return int(lowercase__ ) if __name__ == "__main__": print(F"""{solution() = }""")
85
0
from __future__ import annotations def _SCREAMING_SNAKE_CASE ( lowerCAmelCase__ ): for i in range(1 ,len(matrix[0] ) ): matrix[0][i] += matrix[0][i - 1] # preprocessing the first column for i in range(1 ,len(lowercase__ ) ): matrix[i][0] += matrix[i - 1][0] # updating the path cost for current position for i in range(1 ,len(lowercase__ ) ): for j in range(1 ,len(matrix[0] ) ): matrix[i][j] += min(matrix[i - 1][j] ,matrix[i][j - 1] ) return matrix[-1][-1] if __name__ == "__main__": import doctest doctest.testmod()
364
def _a ( lowercase__ : int ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = int(lowercase__ ) if n_element < 1: SCREAMING_SNAKE_CASE__ : Tuple = ValueError('a should be a positive number' ) raise my_error SCREAMING_SNAKE_CASE__ : Any = [1] SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = (0, 0, 0) SCREAMING_SNAKE_CASE__ : Any = 1 while index < n_element: while hamming_list[i] * 2 <= hamming_list[-1]: i += 1 while hamming_list[j] * 3 <= hamming_list[-1]: j += 1 while hamming_list[k] * 5 <= hamming_list[-1]: k += 1 hamming_list.append( min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) ) index += 1 return hamming_list if __name__ == "__main__": SCREAMING_SNAKE_CASE__ : Any = input("Enter the last number (nth term) of the Hamming Number Series: ") print("Formula of Hamming Number Series => 2^i * 3^j * 5^k") SCREAMING_SNAKE_CASE__ : int = hamming(int(n)) print("-----------------------------------------------------") print(F"""The list with nth numbers is: {hamming_numbers}""") print("-----------------------------------------------------")
85
0
'''simple docstring''' import os import unittest from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer from ...test_tokenization_common import TokenizerTesterMixin class _a (UpperCamelCase_, unittest.TestCase ): '''simple docstring''' lowerCAmelCase_ : str = PhobertTokenizer lowerCAmelCase_ : Optional[int] = False def snake_case_ ( self ) -> Optional[int]: super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt snake_case : Tuple = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@'] snake_case : str = dict(zip(a_ ,range(len(a_ ) ) ) ) snake_case : Any = ['#version: 0.2', 'l à</w>'] snake_case : Optional[int] = {'unk_token': '<unk>'} snake_case : Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] ) snake_case : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] ) with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp: for token in vocab_tokens: fp.write(F'''{token} {vocab_tokens[token]}\n''' ) with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp: fp.write("""\n""".join(a_ ) ) def snake_case_ ( self ,**__a ) -> str: kwargs.update(self.special_tokens_map ) return PhobertTokenizer.from_pretrained(self.tmpdirname ,**a_ ) def snake_case_ ( self ,__a ) -> Optional[int]: snake_case : Tuple = 'Tôi là VinAI Research' snake_case : List[str] = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>' return input_text, output_text def snake_case_ ( self ) -> Optional[int]: snake_case : Optional[int] = PhobertTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map ) snake_case : Tuple = 'Tôi là VinAI Research' snake_case : Tuple = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split() snake_case : List[str] = tokenizer.tokenize(a_ ) print(a_ ) self.assertListEqual(a_ ,a_ ) snake_case : Dict = tokens + [tokenizer.unk_token] snake_case : List[Any] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3] self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) ,a_ )
116
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available SCREAMING_SNAKE_CASE__ : Union[str, Any] = { "configuration_nllb_moe": [ "NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP", "NllbMoeConfig", ] } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : str = [ "NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST", "NllbMoeForConditionalGeneration", "NllbMoeModel", "NllbMoePreTrainedModel", "NllbMoeTop2Router", "NllbMoeSparseMLP", ] if TYPE_CHECKING: from .configuration_nllb_moe import ( NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP, NllbMoeConfig, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_nllb_moe import ( NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST, NllbMoeForConditionalGeneration, NllbMoeModel, NllbMoePreTrainedModel, NllbMoeSparseMLP, NllbMoeTopaRouter, ) else: import sys SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) __magic_name__ : List[str] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: __magic_name__ : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys __magic_name__ : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
615
from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available, is_speech_available, is_torch_available, ) SCREAMING_SNAKE_CASE__ : List[str] = { "configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"], "processing_trocr": ["TrOCRProcessor"], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: SCREAMING_SNAKE_CASE__ : Optional[int] = [ "TROCR_PRETRAINED_MODEL_ARCHIVE_LIST", "TrOCRForCausalLM", "TrOCRPreTrainedModel", ] if TYPE_CHECKING: from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig from .processing_trocr import TrOCRProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel else: import sys SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
85
0
"""simple docstring""" from typing import List, Optional, Union import numpy as np import tensorflow as tf from .utils import logging a = logging.get_logger(__name__) def _snake_case ( _snake_case : Union[tf.Tensor, np.ndarray] ) -> int: '''simple docstring''' if isinstance(lowercase__ , np.ndarray ): return list(tensor.shape ) _A = tf.shape(lowercase__ ) if tensor.shape == tf.TensorShape(lowercase__ ): return dynamic _A = tensor.shape.as_list() return [dynamic[i] if s is None else s for i, s in enumerate(lowercase__ )] def _snake_case ( _snake_case : tf.Tensor , _snake_case : Optional[int] = None , _snake_case : Optional[str] = None ) -> Tuple: '''simple docstring''' return tf.nn.softmax(logits=logits + 1E-9 , axis=lowercase__ , name=lowercase__ ) def _snake_case ( _snake_case : List[Any] , _snake_case : Optional[int] , _snake_case : Tuple , _snake_case : Optional[Any]=1E-5 , _snake_case : Union[str, Any]=-1 ) -> Optional[Any]: '''simple docstring''' if weight.shape.rank != 1 or bias.shape.rank != 1 or not isinstance(lowercase__ , lowercase__ ): raise NotImplementedError('Only 1D weight and bias tensors are supported for now, with only a single axis.' ) # Get mean and variance on the axis to be normalized _A = tf.nn.moments(lowercase__ , axes=[axis] , keepdims=lowercase__ ) if axis != -1: # Reshape scale and weight to have the same rank as inputs, but with 1 dimensions # on every dimension except axis _A = [1] * inputs.shape.rank _A = shape_list(lowercase__ )[axis] _A = tf.reshape(lowercase__ , lowercase__ ) _A = tf.reshape(lowercase__ , lowercase__ ) # Compute layer normalization using the batch_normalization # function. _A = tf.nn.batch_normalization( lowercase__ , lowercase__ , lowercase__ , offset=lowercase__ , scale=lowercase__ , variance_epsilon=lowercase__ , ) return outputs def _snake_case ( _snake_case : List[Any] , _snake_case : Dict=0 , _snake_case : List[Any]=-1 ) -> Optional[Any]: '''simple docstring''' if end_dim < 0: end_dim += input.shape.rank if start_dim < 0: start_dim += input.shape.rank if start_dim == end_dim: return input _A = tf.shape(lowercase__ ) _A = tf.math.reduce_prod(in_shape[start_dim : end_dim + 1] ) _A = tf.concat([in_shape[:start_dim], [flattened_dim], in_shape[end_dim + 1 :]] , axis=0 ) return tf.reshape(lowercase__ , lowercase__ ) def _snake_case ( _snake_case : tf.Tensor ) -> int: '''simple docstring''' if not isinstance(lowercase__ , tf.Tensor ): _A = tf.convert_to_tensor(lowercase__ ) # Catches stray NumPy inputs if encoder_attention_mask.shape.rank == 3: _A = encoder_attention_mask[:, None, :, :] if encoder_attention_mask.shape.rank == 2: _A = encoder_attention_mask[:, None, None, :] # T5 has a mask that can compare sequence ids, we can simulate this here with this transposition # Cf. https://github.com/tensorflow/mesh/blob/8d2465e9bc93129b913b5ccc6a59aa97abd96ec6/mesh_tensorflow # /transformer/transformer_layers.py#L270 # encoder_extended_attention_mask = (encoder_extended_attention_mask == # encoder_extended_attention_mask.transpose(-1, -2)) _A = ( tf.cast(1 , encoder_attention_mask.dtype ) - encoder_extended_attention_mask ) * encoder_extended_attention_mask.dtype.min return encoder_extended_attention_mask def _snake_case ( _snake_case : tf.Tensor , _snake_case : int , _snake_case : str = "input_ids" ) -> Any: '''simple docstring''' tf.debugging.assert_less( lowercase__ , tf.cast(lowercase__ , dtype=tensor.dtype ) , message=( F'''The maximum value of {tensor_name} ({tf.math.reduce_max(lowercase__ )}) must be smaller than the embedding ''' F'''layer\'s input dimension ({embed_dim}). The likely cause is some problem at tokenization time.''' ) , ) def _snake_case ( _snake_case : Union[str, Any] , _snake_case : Union[str, Any] , _snake_case : Optional[Any] ) -> int: '''simple docstring''' _A = 6_45_12 # Check that no item in `data` is larger than `HDF5_OBJECT_HEADER_LIMIT` # because in that case even chunking the array would not make the saving # possible. _A = [x for x in data if len(lowercase__ ) > HDF5_OBJECT_HEADER_LIMIT] # Expecting this to never be true. if bad_attributes: raise RuntimeError( 'The following attributes cannot be saved to HDF5 file because ' F'''they are larger than {HDF5_OBJECT_HEADER_LIMIT} ''' F'''bytes: {bad_attributes}''' ) _A = np.asarray(lowercase__ ) _A = 1 _A = np.array_split(lowercase__ , lowercase__ ) # This will never loop forever thanks to the test above. while any(x.nbytes > HDF5_OBJECT_HEADER_LIMIT for x in chunked_data ): num_chunks += 1 _A = np.array_split(lowercase__ , lowercase__ ) if num_chunks > 1: for chunk_id, chunk_data in enumerate(lowercase__ ): _A = chunk_data else: _A = data def _snake_case ( _snake_case : List[Any] , _snake_case : int ) -> Optional[Any]: '''simple docstring''' if name in group.attrs: _A = [n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs[name]] else: _A = [] _A = 0 while "%s%d" % (name, chunk_id) in group.attrs: data.extend( [n.decode('utf8' ) if hasattr(lowercase__ , 'decode' ) else n for n in group.attrs['%s%d' % (name, chunk_id)]] ) chunk_id += 1 return data def _snake_case ( _snake_case : Optional[int] ) -> Union[str, Any]: '''simple docstring''' def _expand_single_ad_tensor(_snake_case : Optional[int] ): if isinstance(lowercase__ , tf.Tensor ) and t.shape.rank == 1: return tf.expand_dims(lowercase__ , axis=-1 ) return t return tf.nest.map_structure(_expand_single_ad_tensor , lowercase__ )
7
import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs SCREAMING_SNAKE_CASE__ : int = imread(r"digital_image_processing/image_data/lena_small.jpg") SCREAMING_SNAKE_CASE__ : List[Any] = cvtColor(img, COLOR_BGR2GRAY) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = cn.convert_to_negative(lowercase__ ) # assert negative_img array for at least one True assert negative_img.any() def _a ( ): '''simple docstring''' with Image.open('digital_image_processing/image_data/lena_small.jpg' ) as img: # Work around assertion for response assert str(cc.change_contrast(lowercase__ , 1_10 ) ).startswith( '<PIL.Image.Image image mode=RGB size=100x100 at' ) def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[int] = imread('digital_image_processing/image_data/lena_small.jpg' , 0 ) # assert ambiguous array for all == True assert canny_img.all() SCREAMING_SNAKE_CASE__ : List[str] = canny.canny(lowercase__ ) # assert canny array for at least one True assert canny_array.any() def _a ( ): '''simple docstring''' assert gg.gaussian_filter(lowercase__ , 5 , sigma=0.9 ).all() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Any = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) SCREAMING_SNAKE_CASE__ : Tuple = conv.img_convolve(lowercase__ , lowercase__ ).astype(lowercase__ ) assert res.any() def _a ( ): '''simple docstring''' assert med.median_filter(lowercase__ , 3 ).any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = sob.sobel_filter(lowercase__ ) assert grad.any() and theta.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[str] = sp.make_sepia(lowercase__ , 20 ) assert sepia.all() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : str = bs.Burkes(imread(lowercase__ , 1 ) , 1_20 ) burkes.process() assert burkes.output_img.any() def _a ( lowercase__ : str = "digital_image_processing/image_data/lena_small.jpg" , ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Optional[Any] = rs.NearestNeighbour(imread(lowercase__ , 1 ) , 4_00 , 2_00 ) nn.process() assert nn.output.any() def _a ( ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : Dict = 'digital_image_processing/image_data/lena.jpg' # Reading the image and converting it to grayscale. SCREAMING_SNAKE_CASE__ : Dict = imread(lowercase__ , 0 ) # Test for get_neighbors_pixel function() return not None SCREAMING_SNAKE_CASE__ : str = 0 SCREAMING_SNAKE_CASE__ : Dict = 0 SCREAMING_SNAKE_CASE__ : Any = image[x_coordinate][y_coordinate] SCREAMING_SNAKE_CASE__ : List[Any] = lbp.get_neighbors_pixel( lowercase__ , lowercase__ , lowercase__ , lowercase__ ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image SCREAMING_SNAKE_CASE__ : Optional[Any] = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): SCREAMING_SNAKE_CASE__ : str = lbp.local_binary_value(lowercase__ , lowercase__ , lowercase__ ) assert lbp_image.any()
85
0
from __future__ import annotations import unittest import numpy as np from transformers import OPTConfig, is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, slow from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel def _a ( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[Any]=None , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None ) -> int: '''simple docstring''' if attention_mask is None: SCREAMING_SNAKE_CASE__ : Optional[int] = tf.cast(tf.math.not_equal(lowercase__ , config.pad_token_id ) , tf.inta ) return {"input_ids": input_ids, "attention_mask": attention_mask} @require_tf class lowerCamelCase : """simple docstring""" UpperCAmelCase_ = OPTConfig UpperCAmelCase_ = {} UpperCAmelCase_ = "gelu" def __init__( self : str, _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any]=1_3, _UpperCAmelCase : str=7, _UpperCAmelCase : Dict=True, _UpperCAmelCase : Union[str, Any]=False, _UpperCAmelCase : Dict=9_9, _UpperCAmelCase : List[Any]=1_6, _UpperCAmelCase : Any=2, _UpperCAmelCase : Dict=4, _UpperCAmelCase : int=4, _UpperCAmelCase : Union[str, Any]="gelu", _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Optional[Any]=2_0, _UpperCAmelCase : str=2, _UpperCAmelCase : str=1, _UpperCAmelCase : str=0, _UpperCAmelCase : Union[str, Any]=1_6, _UpperCAmelCase : str=1_6, ) -> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = parent SCREAMING_SNAKE_CASE__ : Tuple = batch_size SCREAMING_SNAKE_CASE__ : List[str] = seq_length SCREAMING_SNAKE_CASE__ : Any = is_training SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels SCREAMING_SNAKE_CASE__ : List[Any] = vocab_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads SCREAMING_SNAKE_CASE__ : Dict = intermediate_size SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob SCREAMING_SNAKE_CASE__ : int = max_position_embeddings SCREAMING_SNAKE_CASE__ : Union[str, Any] = eos_token_id SCREAMING_SNAKE_CASE__ : List[Any] = pad_token_id SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id SCREAMING_SNAKE_CASE__ : Any = embed_dim SCREAMING_SNAKE_CASE__ : List[Any] = word_embed_proj_dim SCREAMING_SNAKE_CASE__ : Optional[Any] = False def A_ ( self : Tuple ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size ) SCREAMING_SNAKE_CASE__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 ) SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat([input_ids, eos_tensor], axis=1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.config_cls( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, embed_dim=self.embed_dim, word_embed_proj_dim=self.word_embed_proj_dim, is_encoder_decoder=a_, **self.config_updates, ) SCREAMING_SNAKE_CASE__ : List[str] = prepare_opt_inputs_dict(a_, a_ ) return config, inputs_dict def A_ ( self : Optional[int], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Optional[int] ) -> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = TFOPTModel(config=a_ ) SCREAMING_SNAKE_CASE__ : str = inputs_dict['input_ids'] SCREAMING_SNAKE_CASE__ : Any = input_ids[:1, :] SCREAMING_SNAKE_CASE__ : str = inputs_dict['attention_mask'][:1, :] SCREAMING_SNAKE_CASE__ : Any = 1 # first forward pass SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_, attention_mask=a_, use_cache=a_ ) SCREAMING_SNAKE_CASE__ : Any = outputs.to_tuple() # create hypothetical next token and extent to next_input_ids SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor((self.batch_size, 3), config.vocab_size ) SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta ) # append to next input_ids and SCREAMING_SNAKE_CASE__ : List[Any] = tf.concat([input_ids, next_tokens], axis=-1 ) SCREAMING_SNAKE_CASE__ : int = tf.concat([attention_mask, next_attn_mask], axis=-1 ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_, attention_mask=a_ )[0] SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(a_, attention_mask=a_, past_key_values=a_ )[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] ) # select random slice SCREAMING_SNAKE_CASE__ : Tuple = int(ids_tensor((1,), output_from_past.shape[-1] ) ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = output_from_no_past[:, -3:, random_slice_idx] SCREAMING_SNAKE_CASE__ : List[str] = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(a_, a_, rtol=1E-3 ) @require_tf class lowerCamelCase (UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else () UpperCAmelCase_ = (TFOPTForCausalLM,) if is_tf_available() else () UpperCAmelCase_ = ( {"feature-extraction": TFOPTModel, "text-generation": TFOPTForCausalLM} if is_tf_available() else {} ) UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = False UpperCAmelCase_ = 10 def A_ ( self : List[str] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Dict = TFOPTModelTester(self ) SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self, config_class=a_ ) def A_ ( self : Tuple ) -> Any: """simple docstring""" self.config_tester.run_common_tests() def A_ ( self : Dict ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*a_ ) def A_ ( self : Optional[Any] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(_UpperCAmelCase : str, _UpperCAmelCase : str ): if hasattr(a_, "weight" ): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model.build() if hasattr(a_, "weight" ): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 1_0, config.vocab_size + 1_0]: # build the embeddings SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(config=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = _get_word_embedding_weight(a_, model.get_input_embeddings() ) SCREAMING_SNAKE_CASE__ : Any = _get_word_embedding_weight(a_, model.get_output_embeddings() ) # reshape the embeddings model.resize_token_embeddings(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = _get_word_embedding_weight(a_, model.get_input_embeddings() ) SCREAMING_SNAKE_CASE__ : Tuple = _get_word_embedding_weight(a_, model.get_output_embeddings() ) # check that the resized embeddings size matches the desired size. SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], a_ ) # check that weights remain the same after resizing SCREAMING_SNAKE_CASE__ : List[str] = True for pa, pa in zip(old_input_embeddings.value(), new_input_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: SCREAMING_SNAKE_CASE__ : int = False self.assertTrue(a_ ) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], a_ ) SCREAMING_SNAKE_CASE__ : Dict = True for pa, pa in zip(old_output_embeddings.value(), new_output_embeddings.value() ): if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0: SCREAMING_SNAKE_CASE__ : str = False self.assertTrue(a_ ) def _a ( SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]: '''simple docstring''' return tf.constant(lowercase__ , dtype=tf.intaa ) @require_tf class lowerCamelCase (unittest.TestCase ): """simple docstring""" UpperCAmelCase_ = 99 def A_ ( self : List[str] ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = tf.ones((4, 1), dtype=tf.intaa ) * 2 SCREAMING_SNAKE_CASE__ : str = tf.concat([ids_tensor((4, 6), self.vocab_size - 3 ) + 3, eos_column_vector], axis=1 ) SCREAMING_SNAKE_CASE__ : List[str] = input_ids.shape[0] SCREAMING_SNAKE_CASE__ : str = OPTConfig( vocab_size=self.vocab_size, hidden_size=2_4, num_hidden_layers=2, num_attention_heads=2, ffn_dim=3_2, max_position_embeddings=4_8, eos_token_id=2, pad_token_id=1, bos_token_id=0, ) return config, input_ids, batch_size @require_sentencepiece @require_tf class lowerCamelCase (unittest.TestCase ): """simple docstring""" @slow def A_ ( self : Optional[int] ) -> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = TFOPTModel.from_pretrained("facebook/opt-350m" ) SCREAMING_SNAKE_CASE__ : List[Any] = _long_tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] ) SCREAMING_SNAKE_CASE__ : int = tf.not_equal(a_, model.config.pad_token_id ) with tf.GradientTape(): SCREAMING_SNAKE_CASE__ : List[str] = model(input_ids=a_, attention_mask=a_ ).last_hidden_state SCREAMING_SNAKE_CASE__ : List[str] = (1, 1_1, 5_1_2) self.assertEqual(output.shape, a_ ) SCREAMING_SNAKE_CASE__ : List[Any] = tf.constant( [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] ) self.assertTrue(np.allclose(output[:, :3, :3], a_, atol=4E-3 ) ) SCREAMING_SNAKE_CASE__ : Optional[int] = tf.function(a_, jit_compile=a_ ) SCREAMING_SNAKE_CASE__ : str = xla_generate(a_, a_ )[0] self.assertTrue(np.allclose(output[:, :3, :3], a_, atol=4E-2 ) ) @require_tf @slow class lowerCamelCase (unittest.TestCase ): """simple docstring""" def A_ ( self : str ) -> Any: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : str = 'facebook/opt-350m' def A_ ( self : Union[str, Any] ) -> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = TFOPTForCausalLM.from_pretrained(self.path_model ) SCREAMING_SNAKE_CASE__ : int = GPTaTokenizer.from_pretrained(self.path_model ) SCREAMING_SNAKE_CASE__ : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of', 'Paris is the capital of France and', 'Computers and mobile phones have taken', ] # verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(a_, return_tensors="tf", padding=a_, add_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Dict = tf.math.reduce_mean(model(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 ) SCREAMING_SNAKE_CASE__ : int = tf.constant( [ [1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670], [-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822], [0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703], [6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477], ] ) self.assertTrue(np.allclose(a_, a_, atol=1E-4 ) ) SCREAMING_SNAKE_CASE__ : Dict = tf.function(a_, jit_compile=a_ ) SCREAMING_SNAKE_CASE__ : Dict = tf.math.reduce_mean(xla_generate(inputs.input_ids, attention_mask=inputs.attention_mask )[0], axis=-1 ) self.assertTrue(np.allclose(a_, a_, atol=1E-4 ) ) @require_tf @slow class lowerCamelCase (unittest.TestCase ): """simple docstring""" @property def A_ ( self : int ) -> List[Any]: """simple docstring""" return [ "Today is a beautiful day and I want", "In the city of", "Paris is the capital of France and", "Computers and mobile phones have taken", ] def A_ ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = 'facebook/opt-125m' SCREAMING_SNAKE_CASE__ : int = [ 'Today is a beautiful day and I want to', 'In the city of New York, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] SCREAMING_SNAKE_CASE__ : Dict = [] SCREAMING_SNAKE_CASE__ : Dict = GPTaTokenizer.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFOPTForCausalLM.from_pretrained(a_ ) for prompt in self.prompts: SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(a_, return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE__ : Dict = model.generate(a_, max_length=1_0 ) SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.batch_decode(a_, skip_special_tokens=a_ ) predicted_outputs += generated_string self.assertListEqual(a_, a_ ) def A_ ( self : Union[str, Any] ) -> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'facebook/opt-350m' SCREAMING_SNAKE_CASE__ : List[str] = GPTaTokenizer.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : Any = TFOPTForCausalLM.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = 'left' # use different length sentences to test batching SCREAMING_SNAKE_CASE__ : Any = [ 'Hello, my dog is a little', 'Today, I', ] SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(a_, return_tensors="tf", padding=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = inputs['input_ids'] SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(input_ids=a_, attention_mask=inputs["attention_mask"] ) SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(sentences[0], return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE__ : str = model.generate(input_ids=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = inputs_non_padded.shape[-1] - tf.math.reduce_sum( tf.cast(inputs["attention_mask"][-1], tf.intaa ) ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(sentences[1], return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.generate(input_ids=a_, max_length=model.config.max_length - num_paddings ) SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(a_, skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : str = tokenizer.decode(output_non_padded[0], skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.decode(output_padded[0], skip_special_tokens=a_ ) SCREAMING_SNAKE_CASE__ : Dict = [ 'Hello, my dog is a little bit of a dork.\nI\'m a little bit', 'Today, I was in the middle of a conversation with a friend about the', ] self.assertListEqual(a_, a_ ) self.assertListEqual(a_, [non_padded_sentence, padded_sentence] ) def A_ ( self : int ) -> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = 'facebook/opt-350m' SCREAMING_SNAKE_CASE__ : List[Any] = [ 'Today is a beautiful day and I want to', 'In the city of San Francisco, the city', 'Paris is the capital of France and the capital', 'Computers and mobile phones have taken over the', ] SCREAMING_SNAKE_CASE__ : List[Any] = [] SCREAMING_SNAKE_CASE__ : Optional[int] = GPTaTokenizer.from_pretrained(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = TFOPTForCausalLM.from_pretrained(a_ ) for prompt in self.prompts: SCREAMING_SNAKE_CASE__ : int = tokenizer(a_, return_tensors="tf" ).input_ids SCREAMING_SNAKE_CASE__ : int = model.generate(a_, max_length=1_0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode(a_, skip_special_tokens=a_ ) predicted_outputs += generated_string self.assertListEqual(a_, a_ )
663
import io import json import unittest from parameterized import parameterized from transformers import FSMTForConditionalGeneration, FSMTTokenizer from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device from utils import calculate_bleu SCREAMING_SNAKE_CASE__ : Any = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json" with io.open(filename, "r", encoding="utf-8") as f: SCREAMING_SNAKE_CASE__ : Tuple = json.load(f) @require_torch class snake_case ( unittest.TestCase ): def __lowercase( self : List[str] , a_ : Any )-> str: """simple docstring""" return FSMTTokenizer.from_pretrained(a_ ) def __lowercase( self : int , a_ : Union[str, Any] )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[Any] = FSMTForConditionalGeneration.from_pretrained(a_ ).to(a_ ) if torch_device == "cuda": model.half() return model @parameterized.expand( [ ['en-ru', 26.0], ['ru-en', 22.0], ['en-de', 22.0], ['de-en', 29.0], ] ) @slow def __lowercase( self : int , a_ : Optional[int] , a_ : str )-> List[str]: """simple docstring""" # note: this test is not testing the best performance since it only evals a small batch # but it should be enough to detect a regression in the output quality SCREAMING_SNAKE_CASE__ : Any = F'''facebook/wmt19-{pair}''' SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_tokenizer(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_model(a_ ) SCREAMING_SNAKE_CASE__ : int = bleu_data[pair]['src'] SCREAMING_SNAKE_CASE__ : Optional[int] = bleu_data[pair]['tgt'] SCREAMING_SNAKE_CASE__ : Any = tokenizer(a_ , return_tensors='pt' , truncation=a_ , padding='longest' ).to(a_ ) SCREAMING_SNAKE_CASE__ : int = model.generate( input_ids=batch.input_ids , num_beams=8 , ) SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.batch_decode( a_ , skip_special_tokens=a_ , clean_up_tokenization_spaces=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = calculate_bleu(a_ , a_ ) print(a_ ) self.assertGreaterEqual(scores['bleu'] , a_ )
85
0
"""simple docstring""" # This is the module that test_patching.py uses to test patch_submodule() import os # noqa: this is just for tests import os as renamed_os # noqa: this is just for tests from os import path # noqa: this is just for tests from os import path as renamed_path # noqa: this is just for tests from os.path import join # noqa: this is just for tests from os.path import join as renamed_join # noqa: this is just for tests _SCREAMING_SNAKE_CASE : Dict = open # noqa: we just need to have a builtin inside this module to test it properly
549
import os import pytest from attr import dataclass SCREAMING_SNAKE_CASE__ : int = "us-east-1" # defaults region @dataclass class snake_case : lowercase_ = 42 lowercase_ = 'arn:aws:iam::558105141721:role/sagemaker_execution_role' lowercase_ = { 'task_name': 'mnli', 'per_device_train_batch_size': 16, 'per_device_eval_batch_size': 16, 'do_train': True, 'do_eval': True, 'do_predict': True, 'output_dir': '/opt/ml/model', 'overwrite_output_dir': True, 'max_steps': 500, 'save_steps': 5_500, } lowercase_ = {**hyperparameters, 'max_steps': 1_000} @property def __lowercase( self : List[str] )-> str: """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def __lowercase( self : Union[str, Any] )-> str: """simple docstring""" return F'''{self.framework}-transfromers-test''' @property def __lowercase( self : int )-> str: """simple docstring""" return F'''./tests/sagemaker/scripts/{self.framework}''' @property def __lowercase( self : Tuple )-> str: """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope='class' ) def _a ( lowercase__ : Dict ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : List[Any] = SageMakerTestEnvironment(framework=request.cls.framework )
85
0
"""simple docstring""" import contextlib from multiprocessing import Pool, RLock from tqdm.auto import tqdm from ..utils import experimental, logging lowerCamelCase_ = logging.get_logger(__name__) class _SCREAMING_SNAKE_CASE: SCREAMING_SNAKE_CASE_ : str = None @experimental def __lowerCamelCase ( a_ : Any , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Tuple , a_ : Optional[Any] , a_ : List[Any] , a_ : str ) -> Dict: if ParallelBackendConfig.backend_name is None: return _map_with_multiprocessing_pool( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) return _map_with_joblib(lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ) def __lowerCamelCase ( a_ : Any , a_ : Dict , a_ : str , a_ : Optional[Any] , a_ : Optional[Any] , a_ : List[Any] , a_ : Dict ) -> Tuple: __SCREAMING_SNAKE_CASE :Optional[Any] = num_proc if num_proc <= len(lowercase__ ) else len(lowercase__ ) __SCREAMING_SNAKE_CASE :Dict = [] # We organize the splits ourselve (contiguous splits) for index in range(lowercase__ ): __SCREAMING_SNAKE_CASE :List[Any] = len(lowercase__ ) // num_proc __SCREAMING_SNAKE_CASE :Dict = len(lowercase__ ) % num_proc __SCREAMING_SNAKE_CASE :List[str] = div * index + min(lowercase__ , lowercase__ ) __SCREAMING_SNAKE_CASE :Union[str, Any] = start + div + (1 if index < mod else 0) split_kwds.append((function, iterable[start:end], types, index, disable_tqdm, desc) ) if len(lowercase__ ) != sum(len(i[1] ) for i in split_kwds ): raise ValueError( f'''Error dividing inputs iterable among processes. ''' f'''Total number of objects {len(lowercase__ )}, ''' f'''length: {sum(len(i[1] ) for i in split_kwds )}''' ) logger.info( f'''Spawning {num_proc} processes for {len(lowercase__ )} objects in slices of {[len(i[1] ) for i in split_kwds]}''' ) __SCREAMING_SNAKE_CASE :str = None, None if not disable_tqdm: __SCREAMING_SNAKE_CASE :Dict = (RLock(),), tqdm.set_lock with Pool(lowercase__ , initargs=lowercase__ , initializer=lowercase__ ) as pool: __SCREAMING_SNAKE_CASE :Union[str, Any] = pool.map(lowercase__ , lowercase__ ) logger.info(f'''Finished {num_proc} processes''' ) __SCREAMING_SNAKE_CASE :Tuple = [obj for proc_res in mapped for obj in proc_res] logger.info(f'''Unpacked {len(lowercase__ )} objects''' ) return mapped def __lowerCamelCase ( a_ : List[str] , a_ : str , a_ : List[str] , a_ : List[Any] , a_ : Union[str, Any] , a_ : List[str] , a_ : Optional[Any] ) -> Optional[int]: import joblib with joblib.parallel_backend(ParallelBackendConfig.backend_name , n_jobs=lowercase__ ): return joblib.Parallel()( joblib.delayed(lowercase__ )((function, obj, types, None, True, None) ) for obj in iterable ) @experimental @contextlib.contextmanager def __lowerCamelCase ( a_ : str ) -> Union[str, Any]: __SCREAMING_SNAKE_CASE :List[Any] = backend_name if backend_name == "spark": from joblibspark import register_spark register_spark() # TODO: call create_cache_and_write_probe if "download" in steps # TODO: raise NotImplementedError when Dataset.map etc is called try: yield finally: __SCREAMING_SNAKE_CASE :Tuple = None
498
import os import unittest from transformers import FunnelTokenizer, FunnelTokenizerFast from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from ...test_tokenization_common import TokenizerTesterMixin @require_tokenizers class snake_case ( UpperCamelCase_ , unittest.TestCase ): lowercase_ = FunnelTokenizer lowercase_ = FunnelTokenizerFast lowercase_ = True lowercase_ = True def __lowercase( self : Union[str, Any] )-> Tuple: """simple docstring""" super().setUp() SCREAMING_SNAKE_CASE__ : str = [ '<unk>', '<cls>', '<sep>', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest', ] SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] ) with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer: vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) ) def __lowercase( self : Any , **a_ : Any )-> List[str]: """simple docstring""" return FunnelTokenizer.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Tuple , **a_ : List[Any] )-> List[Any]: """simple docstring""" return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **a_ ) def __lowercase( self : Optional[Any] , a_ : List[str] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'UNwant\u00E9d,running' SCREAMING_SNAKE_CASE__ : int = 'unwanted, running' return input_text, output_text def __lowercase( self : Optional[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class(self.vocab_file ) SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('UNwant\u00E9d,running' ) self.assertListEqual(a_ , ['un', '##want', '##ed', ',', 'runn', '##ing'] ) self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , [7, 4, 5, 10, 8, 9] ) def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizers(do_lower_case=a_ ) for tokenizer in tokenizers: SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer('UNwant\u00E9d,running' ) SCREAMING_SNAKE_CASE__ : List[Any] = len(inputs['input_ids'] ) - 1 self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' ) self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
85
0
'''simple docstring''' from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging A__ : Dict = logging.get_logger(__name__) A__ : Any = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case__ ( UpperCamelCase_ ): A__ = '''levit''' def __init__( self : str , __a : Optional[Any]=224 , __a : List[str]=3 , __a : Any=3 , __a : Any=2 , __a : Tuple=1 , __a : int=16 , __a : Optional[int]=[128, 256, 384] , __a : Dict=[4, 8, 12] , __a : List[str]=[4, 4, 4] , __a : Any=[16, 16, 16] , __a : Dict=0 , __a : Tuple=[2, 2, 2] , __a : Union[str, Any]=[2, 2, 2] , __a : Optional[Any]=0.0_2 , **__a : str , ) -> Any: '''simple docstring''' super().__init__(**a_ ) __snake_case : Any = image_size __snake_case : List[Any] = num_channels __snake_case : Any = kernel_size __snake_case : Union[str, Any] = stride __snake_case : Any = padding __snake_case : Any = hidden_sizes __snake_case : List[Any] = num_attention_heads __snake_case : Optional[Any] = depths __snake_case : List[str] = key_dim __snake_case : int = drop_path_rate __snake_case : List[str] = patch_size __snake_case : List[str] = attention_ratio __snake_case : Tuple = mlp_ratio __snake_case : str = initializer_range __snake_case : List[Any] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case__ ( UpperCamelCase_ ): A__ = version.parse('''1.11''' ) @property def A_ ( self : str ) -> Mapping[str, Mapping[int, str]]: '''simple docstring''' return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def A_ ( self : Any ) -> float: '''simple docstring''' return 1e-4
286
from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging SCREAMING_SNAKE_CASE__ : Dict = logging.get_logger(__name__) SCREAMING_SNAKE_CASE__ : Any = { "facebook/levit-128S": "https://huggingface.co/facebook/levit-128S/resolve/main/config.json", # See all LeViT models at https://huggingface.co/models?filter=levit } class snake_case ( UpperCamelCase_ ): lowercase_ = 'levit' def __init__( self : str , a_ : Optional[Any]=224 , a_ : List[str]=3 , a_ : Any=3 , a_ : Any=2 , a_ : Tuple=1 , a_ : int=16 , a_ : Optional[int]=[128, 256, 384] , a_ : Dict=[4, 8, 12] , a_ : List[str]=[4, 4, 4] , a_ : Any=[16, 16, 16] , a_ : Dict=0 , a_ : Tuple=[2, 2, 2] , a_ : Union[str, Any]=[2, 2, 2] , a_ : Optional[Any]=0.02 , **a_ : str , )-> Any: """simple docstring""" super().__init__(**a_ ) SCREAMING_SNAKE_CASE__ : Any = image_size SCREAMING_SNAKE_CASE__ : List[Any] = num_channels SCREAMING_SNAKE_CASE__ : Any = kernel_size SCREAMING_SNAKE_CASE__ : Union[str, Any] = stride SCREAMING_SNAKE_CASE__ : Any = padding SCREAMING_SNAKE_CASE__ : Any = hidden_sizes SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads SCREAMING_SNAKE_CASE__ : Optional[Any] = depths SCREAMING_SNAKE_CASE__ : List[str] = key_dim SCREAMING_SNAKE_CASE__ : int = drop_path_rate SCREAMING_SNAKE_CASE__ : List[str] = patch_size SCREAMING_SNAKE_CASE__ : List[str] = attention_ratio SCREAMING_SNAKE_CASE__ : Tuple = mlp_ratio SCREAMING_SNAKE_CASE__ : str = initializer_range SCREAMING_SNAKE_CASE__ : List[Any] = [ ['Subsample', key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2], ['Subsample', key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2], ] class snake_case ( UpperCamelCase_ ): lowercase_ = version.parse('1.11' ) @property def __lowercase( self : str )-> Mapping[str, Mapping[int, str]]: """simple docstring""" return OrderedDict( [ ('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}), ] ) @property def __lowercase( self : Any )-> float: """simple docstring""" return 1e-4
85
0
'''simple docstring''' import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset __A : Optional[Any] = random.Random() def UpperCamelCase_ ( A__ : int , A__ : str=1.0 , A__ : Optional[int]=None , A__ : Any=None ): '''simple docstring''' if rng is None: lowerCAmelCase_ : Tuple = global_rng lowerCAmelCase_ : List[Any] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class __snake_case ( unittest.TestCase): """simple docstring""" def __init__( self : Dict , lowerCamelCase : int , lowerCamelCase : List[str]=7 , lowerCamelCase : Dict=4_00 , lowerCamelCase : List[str]=20_00 , lowerCamelCase : Any=20_48 , lowerCamelCase : Optional[Any]=1_28 , lowerCamelCase : List[str]=1 , lowerCamelCase : Any=5_12 , lowerCamelCase : List[Any]=30 , lowerCamelCase : List[str]=4_41_00 , ) -> Optional[Any]: lowerCAmelCase_ : int = parent lowerCAmelCase_ : Optional[int] = batch_size lowerCAmelCase_ : Any = min_seq_length lowerCAmelCase_ : Tuple = max_seq_length lowerCAmelCase_ : str = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) lowerCAmelCase_ : Optional[int] = spectrogram_length lowerCAmelCase_ : Optional[Any] = feature_size lowerCAmelCase_ : List[Any] = num_audio_channels lowerCAmelCase_ : str = hop_length lowerCAmelCase_ : Any = chunk_length lowerCAmelCase_ : str = sampling_rate def __lowercase ( self : Optional[Any] ) -> Optional[Any]: return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __lowercase ( self : Optional[int] , lowerCamelCase : List[Any]=False , lowerCamelCase : Optional[int]=False ) -> List[Any]: def _flatten(lowerCamelCase : Optional[Any] ): return list(itertools.chain(*a_ ) ) if equal_length: lowerCAmelCase_ : Union[str, Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size lowerCAmelCase_ : Optional[int] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: lowerCAmelCase_ : Dict = [np.asarray(a_ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class __snake_case ( UpperCamelCase_ ,unittest.TestCase): """simple docstring""" lowercase = TvltFeatureExtractor def __lowercase ( self : Tuple ) -> List[str]: lowerCAmelCase_ : List[Any] = TvltFeatureExtractionTester(self ) def __lowercase ( self : List[Any] ) -> Optional[Any]: lowerCAmelCase_ : List[Any] = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(a_ , """spectrogram_length""" ) ) self.assertTrue(hasattr(a_ , """feature_size""" ) ) self.assertTrue(hasattr(a_ , """num_audio_channels""" ) ) self.assertTrue(hasattr(a_ , """hop_length""" ) ) self.assertTrue(hasattr(a_ , """chunk_length""" ) ) self.assertTrue(hasattr(a_ , """sampling_rate""" ) ) def __lowercase ( self : Optional[int] ) -> Dict: lowerCAmelCase_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ : Union[str, Any] = feat_extract_first.save_pretrained(a_ )[0] check_json_file_has_correct_format(a_ ) lowerCAmelCase_ : str = self.feature_extraction_class.from_pretrained(a_ ) lowerCAmelCase_ : List[Any] = feat_extract_first.to_dict() lowerCAmelCase_ : Union[str, Any] = feat_extract_second.to_dict() lowerCAmelCase_ : Union[str, Any] = dict_first.pop("""mel_filters""" ) lowerCAmelCase_ : Any = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(a_ , a_ ) ) self.assertEqual(a_ , a_ ) def __lowercase ( self : Tuple ) -> List[str]: lowerCAmelCase_ : Dict = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: lowerCAmelCase_ : Dict = os.path.join(a_ , """feat_extract.json""" ) feat_extract_first.to_json_file(a_ ) lowerCAmelCase_ : Tuple = self.feature_extraction_class.from_json_file(a_ ) lowerCAmelCase_ : Optional[Any] = feat_extract_first.to_dict() lowerCAmelCase_ : List[str] = feat_extract_second.to_dict() lowerCAmelCase_ : Dict = dict_first.pop("""mel_filters""" ) lowerCAmelCase_ : List[Any] = dict_second.pop("""mel_filters""" ) self.assertTrue(np.allclose(a_ , a_ ) ) self.assertEqual(a_ , a_ ) def __lowercase ( self : int ) -> Union[str, Any]: lowerCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 lowerCAmelCase_ : List[str] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )] lowerCAmelCase_ : Dict = [np.asarray(a_ ) for speech_input in speech_inputs] # Test not batched input lowerCAmelCase_ : Union[str, Any] = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched lowerCAmelCase_ : Dict = feature_extractor(a_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking lowerCAmelCase_ : Dict = feature_extractor( a_ , return_tensors="""np""" , sampling_rate=4_41_00 , mask_audio=a_ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. lowerCAmelCase_ : Optional[Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)] lowerCAmelCase_ : int = np.asarray(a_ ) lowerCAmelCase_ : Optional[Any] = feature_extractor(a_ , return_tensors="""np""" , sampling_rate=4_41_00 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __lowercase ( self : Optional[Any] , lowerCamelCase : Optional[int] ) -> Tuple: lowerCAmelCase_ : str = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" ) # automatic decoding with librispeech lowerCAmelCase_ : str = ds.sort("""id""" ).select(range(a_ ) )[:num_samples]['audio'] return [x["array"] for x in speech_samples] def __lowercase ( self : Optional[int] ) -> int: lowerCAmelCase_ : Optional[Any] = self._load_datasamples(1 ) lowerCAmelCase_ : str = TvltFeatureExtractor() lowerCAmelCase_ : List[str] = feature_extractor(a_ , return_tensors="""pt""" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_92, 1_28) ) lowerCAmelCase_ : Tuple = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , a_ , atol=1E-4 ) )
275
import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class snake_case ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): lowercase_ = StableDiffusionInstructPixaPixPipeline lowercase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'} lowercase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS lowercase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS def __lowercase( self : str )-> int: """simple docstring""" torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : List[Any] = UNetaDConditionModel( block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , ) SCREAMING_SNAKE_CASE__ : List[str] = PNDMScheduler(skip_prk_steps=a_ ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = AutoencoderKL( block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , ) torch.manual_seed(0 ) SCREAMING_SNAKE_CASE__ : Optional[int] = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) SCREAMING_SNAKE_CASE__ : int = CLIPTextModel(a_ ) SCREAMING_SNAKE_CASE__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' ) SCREAMING_SNAKE_CASE__ : List[str] = { 'unet': unet, 'scheduler': scheduler, 'vae': vae, 'text_encoder': text_encoder, 'tokenizer': tokenizer, 'safety_checker': None, 'feature_extractor': None, } return components def __lowercase( self : List[Any] , a_ : Tuple , a_ : Optional[Any]=0 )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ ) SCREAMING_SNAKE_CASE__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0] SCREAMING_SNAKE_CASE__ : List[Any] = Image.fromarray(np.uinta(a_ ) ).convert('RGB' ) if str(a_ ).startswith('mps' ): SCREAMING_SNAKE_CASE__ : str = torch.manual_seed(a_ ) else: SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : Dict = { 'prompt': 'A painting of a squirrel eating a burger', 'image': image, 'generator': generator, 'num_inference_steps': 2, 'guidance_scale': 6.0, 'image_guidance_scale': 1, 'output_type': 'numpy', } return inputs def __lowercase( self : str )-> Optional[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Dict = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : Dict = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Optional[Any] )-> int: """simple docstring""" SCREAMING_SNAKE_CASE__ : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Dict = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = 'french fries' SCREAMING_SNAKE_CASE__ : Optional[Any] = sd_pipe(**a_ , negative_prompt=a_ ) SCREAMING_SNAKE_CASE__ : Dict = output.images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Optional[Any] = [inputs['prompt']] * 2 SCREAMING_SNAKE_CASE__ : List[str] = np.array(inputs['image'] ).astype(np.floataa ) / 255.0 SCREAMING_SNAKE_CASE__ : Tuple = torch.from_numpy(a_ ).unsqueeze(0 ).to(a_ ) SCREAMING_SNAKE_CASE__ : Dict = image / 2 + 0.5 SCREAMING_SNAKE_CASE__ : Tuple = image.permute(0 , 3 , 1 , 2 ) SCREAMING_SNAKE_CASE__ : int = image.repeat(2 , 1 , 1 , 1 ) SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[-1, -3:, -3:, -1] assert image.shape == (2, 32, 32, 3) SCREAMING_SNAKE_CASE__ : int = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : List[Any] )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator SCREAMING_SNAKE_CASE__ : str = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' ) SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : Dict = sd_pipe.to(a_ ) sd_pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_dummy_inputs(a_ ) SCREAMING_SNAKE_CASE__ : Tuple = sd_pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Any = [round(a_ , 4 ) for x in image_slice.flatten().tolist()] print(','.join([str(a_ ) for x in slice] ) ) assert image.shape == (1, 32, 32, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __lowercase( self : Union[str, Any] )-> Any: """simple docstring""" super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __lowercase( self : List[Any] )-> Dict: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = self.get_dummy_components() SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline(**a_ ) SCREAMING_SNAKE_CASE__ : int = VaeImageProcessor(do_resize=a_ , do_normalize=a_ ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) )[0] SCREAMING_SNAKE_CASE__ : Optional[int] = components['vae'] SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_dummy_inputs_by_type(a_ , input_image_type='pt' ) for image_param in self.image_latents_params: if image_param in inputs.keys(): SCREAMING_SNAKE_CASE__ : Union[str, Any] = vae.encode(inputs[image_param] ).latent_dist.mode() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ )[0] SCREAMING_SNAKE_CASE__ : List[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(a_ , 1e-4 , 'passing latents as image input generate different result from passing image' ) @slow @require_torch_gpu class snake_case ( unittest.TestCase ): def __lowercase( self : Tuple )-> Dict: """simple docstring""" super().tearDown() gc.collect() torch.cuda.empty_cache() def __lowercase( self : List[Any] , a_ : Dict=0 )-> Any: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = torch.manual_seed(a_ ) SCREAMING_SNAKE_CASE__ : List[str] = load_image( 'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' ) SCREAMING_SNAKE_CASE__ : Tuple = { 'prompt': 'turn him into a cyborg', 'image': image, 'generator': generator, 'num_inference_steps': 3, 'guidance_scale': 7.5, 'image_guidance_scale': 1.0, 'output_type': 'numpy', } return inputs def __lowercase( self : int )-> Optional[int]: """simple docstring""" SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Optional[Any] = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Dict )-> str: """simple docstring""" SCREAMING_SNAKE_CASE__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : str = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Dict = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : Optional[int] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[Any] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : Optional[int] )-> Union[str, Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ ) SCREAMING_SNAKE_CASE__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : str = self.get_inputs() SCREAMING_SNAKE_CASE__ : Tuple = pipe(**a_ ).images SCREAMING_SNAKE_CASE__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 512, 512, 3) SCREAMING_SNAKE_CASE__ : List[str] = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __lowercase( self : int )-> List[str]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = 0 def callback_fn(a_ : int , a_ : int , a_ : torch.FloatTensor ) -> None: SCREAMING_SNAKE_CASE__ : Tuple = True nonlocal number_of_steps number_of_steps += 1 if step == 1: SCREAMING_SNAKE_CASE__ : Union[str, Any] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : List[Any] = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Optional[int] = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: SCREAMING_SNAKE_CASE__ : Optional[int] = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 64, 64) SCREAMING_SNAKE_CASE__ : Tuple = latents[0, -3:, -3:, -1] SCREAMING_SNAKE_CASE__ : Dict = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 SCREAMING_SNAKE_CASE__ : List[str] = False SCREAMING_SNAKE_CASE__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() pipe(**a_ , callback=a_ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __lowercase( self : int )-> Any: """simple docstring""" torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( 'timbrooks/instruct-pix2pix' , safety_checker=a_ , torch_dtype=torch.floataa ) SCREAMING_SNAKE_CASE__ : Tuple = pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() SCREAMING_SNAKE_CASE__ : Tuple = self.get_inputs() SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : Any = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 10**9 def __lowercase( self : Tuple )-> List[Any]: """simple docstring""" SCREAMING_SNAKE_CASE__ : str = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 SCREAMING_SNAKE_CASE__ : Dict = inputs['image'].resize((504, 504) ) SCREAMING_SNAKE_CASE__ : List[Any] = 'timbrooks/instruct-pix2pix' SCREAMING_SNAKE_CASE__ : str = StableDiffusionInstructPixaPixPipeline.from_pretrained( a_ , safety_checker=a_ , ) pipe.to(a_ ) pipe.set_progress_bar_config(disable=a_ ) pipe.enable_attention_slicing() SCREAMING_SNAKE_CASE__ : Any = pipe(**a_ ) SCREAMING_SNAKE_CASE__ : List[str] = output.images[0] SCREAMING_SNAKE_CASE__ : Any = image[255:258, 383:386, -1] assert image.shape == (504, 504, 3) SCREAMING_SNAKE_CASE__ : str = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
85
0
import inspect import unittest from huggingface_hub import hf_hub_download from transformers import ConvNextConfig, UperNetConfig from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import UperNetForSemanticSegmentation from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class __snake_case : def __init__( self ,snake_case ,snake_case=13 ,snake_case=32 ,snake_case=3 ,snake_case=4 ,snake_case=[10, 20, 30, 40] ,snake_case=[2, 2, 3, 2] ,snake_case=True ,snake_case=True ,snake_case=37 ,snake_case="gelu" ,snake_case=10 ,snake_case=0.02 ,snake_case=["stage2", "stage3", "stage4"] ,snake_case=3 ,snake_case=None ,): '''simple docstring''' lowercase : Dict = parent lowercase : Optional[Any] = batch_size lowercase : List[Any] = image_size lowercase : Tuple = num_channels lowercase : List[Any] = num_stages lowercase : List[Any] = hidden_sizes lowercase : str = depths lowercase : Tuple = is_training lowercase : str = use_labels lowercase : Optional[Any] = intermediate_size lowercase : Tuple = hidden_act lowercase : Optional[Any] = type_sequence_label_size lowercase : Any = initializer_range lowercase : str = out_features lowercase : List[str] = num_labels lowercase : str = scope lowercase : List[Any] = num_stages def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) lowercase : Union[str, Any] = None if self.use_labels: lowercase : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size ) lowercase : Any = self.get_config() return config, pixel_values, labels def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return ConvNextConfig( num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return UperNetConfig( backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=a_ ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=a_ ,loss_ignore_index=255 ,num_labels=self.num_labels ,) def _SCREAMING_SNAKE_CASE ( self ,snake_case ,snake_case ,snake_case ): '''simple docstring''' lowercase : Any = UperNetForSemanticSegmentation(config=a_ ) model.to(a_ ) model.eval() lowercase : Union[str, Any] = model(a_ ) self.parent.assertEqual( result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = self.prepare_config_and_inputs() ( lowercase ) : Union[str, Any] = config_and_inputs lowercase : int = {'pixel_values': pixel_values} return config, inputs_dict @require_torch class __snake_case ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ): _a : Optional[Any]= (UperNetForSemanticSegmentation,) if is_torch_available() else () _a : Any= {"image-segmentation": UperNetForSemanticSegmentation} if is_torch_available() else {} _a : List[Any]= False _a : Any= False _a : Optional[int]= False _a : List[Any]= False _a : str= False _a : Optional[int]= False def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[int] = UperNetModelTester(self ) lowercase : Union[str, Any] = ConfigTester(self ,config_class=a_ ,has_text_modality=a_ ,hidden_size=37 ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' return def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : List[str] = model_class(a_ ) lowercase : Dict = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic lowercase : Tuple = [*signature.parameters.keys()] lowercase : int = ['pixel_values'] self.assertListEqual(arg_names[:1] ,a_ ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*a_ ) @unittest.skip(reason="""UperNet does not use inputs_embeds""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not support input and output embeddings""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass @unittest.skip(reason="""UperNet does not have a base model""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="""UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass @unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' def check_hidden_states_output(snake_case ,snake_case ,snake_case ): lowercase : Union[str, Any] = model_class(a_ ) model.to(a_ ) model.eval() with torch.no_grad(): lowercase : int = model(**self._prepare_for_class(a_ ,a_ ) ) lowercase : int = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states lowercase : Optional[Any] = self.model_tester.num_stages self.assertEqual(len(a_ ) ,expected_num_stages + 1 ) # ConvNext's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,) lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: lowercase : Optional[int] = True check_hidden_states_output(a_ ,a_ ,a_ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] lowercase : Union[str, Any] = True check_hidden_states_output(a_ ,a_ ,a_ ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() lowercase : Tuple = _config_zero_init(a_ ) lowercase : List[str] = _config_zero_init(configs_no_init.backbone_config ) for model_class in self.all_model_classes: lowercase : List[str] = model_class(config=a_ ) for name, param in model.named_parameters(): if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f"Parameter {name} of model {model_class} seems not properly initialized" ,) @unittest.skip(reason="""UperNet does not have tied weights""" ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' pass @slow def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: lowercase : List[Any] = UperNetForSemanticSegmentation.from_pretrained(a_ ) self.assertIsNotNone(a_ ) def _snake_case( ) -> Tuple: lowercase : Any = hf_hub_download( repo_id="""hf-internal-testing/fixtures_ade20k""" , repo_type="""dataset""" , filename="""ADE_val_00000001.jpg""" ) lowercase : Any = Image.open(lowercase__ ).convert("""RGB""" ) return image @require_torch @require_vision @slow class __snake_case ( unittest.TestCase ): def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Optional[Any] = AutoImageProcessor.from_pretrained("""openmmlab/upernet-swin-tiny""" ) lowercase : List[str] = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-swin-tiny""" ).to(a_ ) lowercase : List[Any] = prepare_img() lowercase : str = processor(images=a_ ,return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): lowercase : Any = model(**a_ ) lowercase : Optional[int] = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape ,a_ ) lowercase : Any = torch.tensor( [[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,a_ ,atol=1e-4 ) ) def _SCREAMING_SNAKE_CASE ( self ): '''simple docstring''' lowercase : Any = AutoImageProcessor.from_pretrained("""openmmlab/upernet-convnext-tiny""" ) lowercase : str = UperNetForSemanticSegmentation.from_pretrained("""openmmlab/upernet-convnext-tiny""" ).to(a_ ) lowercase : List[Any] = prepare_img() lowercase : Optional[int] = processor(images=a_ ,return_tensors="""pt""" ).to(a_ ) with torch.no_grad(): lowercase : str = model(**a_ ) lowercase : int = torch.Size((1, model.config.num_labels, 512, 512) ) self.assertEqual(outputs.logits.shape ,a_ ) lowercase : Union[str, Any] = torch.tensor( [[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(a_ ) self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,a_ ,atol=1e-4 ) )
336
import math from collections.abc import Callable def _a ( lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ): '''simple docstring''' SCREAMING_SNAKE_CASE__ : float = xa SCREAMING_SNAKE_CASE__ : float = xa while True: if x_n == x_na or function(lowercase__ ) == function(lowercase__ ): raise ZeroDivisionError('float division by zero, could not find root' ) SCREAMING_SNAKE_CASE__ : float = x_na - ( function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n)) ) if abs(x_na - x_na ) < 10**-5: return x_na SCREAMING_SNAKE_CASE__ : Dict = x_na SCREAMING_SNAKE_CASE__ : List[str] = x_na def _a ( lowercase__ : float ): '''simple docstring''' return math.pow(lowercase__ , 3 ) - (2 * x) - 5 if __name__ == "__main__": print(intersection(f, 3, 3.5))
85
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices _A = logging.get_logger(__name__) _A = { "microsoft/swin-tiny-patch4-window7-224": ( "https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json" ), # See all Swin models at https://huggingface.co/models?filter=swin } class __UpperCAmelCase ( UpperCamelCase_ , UpperCamelCase_ ): """simple docstring""" _snake_case : List[Any] = 'swin' _snake_case : Tuple = { 'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers', } def __init__( self : str , A_ : Dict=2_24 , A_ : Optional[int]=4 , A_ : Optional[Any]=3 , A_ : Optional[int]=96 , A_ : List[str]=[2, 2, 6, 2] , A_ : Union[str, Any]=[3, 6, 12, 24] , A_ : int=7 , A_ : List[Any]=4.0 , A_ : Any=True , A_ : str=0.0 , A_ : str=0.0 , A_ : List[str]=0.1 , A_ : Union[str, Any]="gelu" , A_ : List[str]=False , A_ : List[str]=0.02 , A_ : Union[str, Any]=1e-5 , A_ : int=32 , A_ : Optional[Any]=None , A_ : List[Any]=None , **A_ : Dict , )-> Any: super().__init__(**a_ ) __UpperCamelCase = image_size __UpperCamelCase = patch_size __UpperCamelCase = num_channels __UpperCamelCase = embed_dim __UpperCamelCase = depths __UpperCamelCase = len(a_ ) __UpperCamelCase = num_heads __UpperCamelCase = window_size __UpperCamelCase = mlp_ratio __UpperCamelCase = qkv_bias __UpperCamelCase = hidden_dropout_prob __UpperCamelCase = attention_probs_dropout_prob __UpperCamelCase = drop_path_rate __UpperCamelCase = hidden_act __UpperCamelCase = use_absolute_embeddings __UpperCamelCase = layer_norm_eps __UpperCamelCase = initializer_range __UpperCamelCase = encoder_stride # we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel # this indicates the channel dimension after the last stage of the model __UpperCamelCase = int(embed_dim * 2 ** (len(a_ ) - 1) ) __UpperCamelCase = ['stem'] + [f"""stage{idx}""" for idx in range(1 , len(a_ ) + 1 )] __UpperCamelCase = get_aligned_output_features_output_indices( out_features=a_ , out_indices=a_ , stage_names=self.stage_names ) class __UpperCAmelCase ( UpperCamelCase_ ): """simple docstring""" _snake_case : Optional[Any] = version.parse('1.11' ) @property def A ( self : Optional[Any] )-> Mapping[str, Mapping[int, str]]: return OrderedDict( [ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}), ] ) @property def A ( self : List[str] )-> float: return 1e-4
505
from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class snake_case ( UpperCamelCase_ ): lowercase_ = ['image_processor', 'tokenizer'] lowercase_ = 'AutoImageProcessor' lowercase_ = 'AutoTokenizer' def __init__( self : List[Any] , a_ : int , a_ : Union[str, Any] )-> List[Any]: """simple docstring""" super().__init__(a_ , a_ ) SCREAMING_SNAKE_CASE__ : str = self.image_processor def __call__( self : Tuple , a_ : str=None , a_ : List[Any]=None , a_ : Optional[Any]=None , **a_ : Dict )-> Tuple: """simple docstring""" if text is None and images is None: raise ValueError('You have to specify either text or images. Both cannot be none.' ) if text is not None: SCREAMING_SNAKE_CASE__ : Any = self.tokenizer(a_ , return_tensors=a_ , **a_ ) if images is not None: SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(a_ , return_tensors=a_ , **a_ ) if text is not None and images is not None: SCREAMING_SNAKE_CASE__ : List[str] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ ) def __lowercase( self : Dict , *a_ : Any , **a_ : Any )-> List[Any]: """simple docstring""" return self.tokenizer.batch_decode(*a_ , **a_ ) def __lowercase( self : Dict , *a_ : Union[str, Any] , **a_ : Optional[int] )-> Dict: """simple docstring""" return self.tokenizer.decode(*a_ , **a_ ) @property def __lowercase( self : Any )-> Any: """simple docstring""" return ["input_ids", "attention_mask", "pixel_values"]
85
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowercase : Any ={ "configuration_bloom": ["BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP", "BloomConfig", "BloomOnnxConfig"], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] =["BloomTokenizerFast"] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowercase : List[str] =[ "BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST", "BloomForCausalLM", "BloomModel", "BloomPreTrainedModel", "BloomForSequenceClassification", "BloomForTokenClassification", "BloomForQuestionAnswering", ] if TYPE_CHECKING: from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_bloom_fast import BloomTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_bloom import ( BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST, BloomForCausalLM, BloomForQuestionAnswering, BloomForSequenceClassification, BloomForTokenClassification, BloomModel, BloomPreTrainedModel, ) else: import sys _lowercase : Tuple =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
364
import math import numpy as np import qiskit from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute def _a ( lowercase__ : int = 3 ): '''simple docstring''' if isinstance(lowercase__ , lowercase__ ): raise TypeError('number of qubits must be a integer.' ) if number_of_qubits <= 0: raise ValueError('number of qubits must be > 0.' ) if math.floor(lowercase__ ) != number_of_qubits: raise ValueError('number of qubits must be exact integer.' ) if number_of_qubits > 10: raise ValueError('number of qubits too large to simulate(>10).' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumRegister(lowercase__ , 'qr' ) SCREAMING_SNAKE_CASE__ : int = ClassicalRegister(lowercase__ , 'cr' ) SCREAMING_SNAKE_CASE__ : Tuple = QuantumCircuit(lowercase__ , lowercase__ ) SCREAMING_SNAKE_CASE__ : Tuple = number_of_qubits for i in range(lowercase__ ): quantum_circuit.h(number_of_qubits - i - 1 ) counter -= 1 for j in range(lowercase__ ): quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ ) for k in range(number_of_qubits // 2 ): quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 ) # measure all the qubits quantum_circuit.measure(lowercase__ , lowercase__ ) # simulate with 10000 shots SCREAMING_SNAKE_CASE__ : Optional[int] = Aer.get_backend('qasm_simulator' ) SCREAMING_SNAKE_CASE__ : Tuple = execute(lowercase__ , lowercase__ , shots=1_00_00 ) return job.result().get_counts(lowercase__ ) if __name__ == "__main__": print( F"""Total count for quantum fourier transform state is: \ {quantum_fourier_transform(3)}""" )
85
0