code
stringlengths
86
54.5k
code_codestyle
int64
0
371
style_context
stringlengths
87
49.2k
style_context_codestyle
int64
0
349
label
int64
0
1
"""simple docstring""" import argparse import os import gluonnlp as nlp import mxnet as mx import numpy as np import torch from gluonnlp.base import get_home_dir from gluonnlp.model.bert import BERTEncoder from gluonnlp.model.utils import _load_vocab from gluonnlp.vocab import Vocab from packaging import version from torch import nn from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer from transformers.models.bert.modeling_bert import ( BertIntermediate, BertLayer, BertOutput, BertSelfAttention, BertSelfOutput, ) from transformers.utils import logging if version.parse(nlp.__version__) != version.parse("""0.8.3"""): raise Exception("""requires gluonnlp == 0.8.3""") if version.parse(mx.__version__) != version.parse("""1.5.0"""): raise Exception("""requires mxnet == 1.5.0""") logging.set_verbosity_info() _lowerCAmelCase : str = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = """The Nymphenburg Palace is a beautiful palace in Munich!""" def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> List[str]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = { "attention_cell": "multi_head", "num_layers": 4, "units": 1024, "hidden_size": 768, "max_length": 512, "num_heads": 8, "scaled": True, "dropout": 0.1, "use_residual": True, "embed_size": 1024, "embed_dropout": 0.1, "word_embed": None, "layer_norm_eps": 1E-5, "token_type_vocab_size": 2, } UpperCAmelCase__ : Tuple = bort_4_8_768_1024_hparams # Let's construct the original Bort model here # Taken from official BERT implementation, see: # https://github.com/alexa/bort/blob/master/bort/bort.py UpperCAmelCase__ : List[str] = BERTEncoder( attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=snake_case , output_all_encodings=snake_case , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , snake_case ) , ) # Vocab information needs to be fetched first # It's the same as RoBERTa, so RobertaTokenizer can be used later UpperCAmelCase__ : Any = "openwebtext_ccnews_stories_books_cased" # Specify download folder to Gluonnlp's vocab UpperCAmelCase__ : Tuple = os.path.join(get_home_dir() , "models" ) UpperCAmelCase__ : Union[str, Any] = _load_vocab(snake_case , snake_case , snake_case , cls=snake_case ) UpperCAmelCase__ : Union[str, Any] = nlp.model.BERTModel( snake_case , len(snake_case ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=snake_case , use_token_type_embed=snake_case , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=snake_case , use_decoder=snake_case , ) original_bort.load_parameters(snake_case , cast_dtype=snake_case , ignore_extra=snake_case ) UpperCAmelCase__ : Optional[int] = original_bort._collect_params_with_prefix() # Build our config 🤗 UpperCAmelCase__ : Dict = { "architectures": ["BertForMaskedLM"], "attention_probs_dropout_prob": predefined_args["dropout"], "hidden_act": "gelu", "hidden_dropout_prob": predefined_args["dropout"], "hidden_size": predefined_args["embed_size"], "initializer_range": 0.02, "intermediate_size": predefined_args["hidden_size"], "layer_norm_eps": predefined_args["layer_norm_eps"], "max_position_embeddings": predefined_args["max_length"], "model_type": "bort", "num_attention_heads": predefined_args["num_heads"], "num_hidden_layers": predefined_args["num_layers"], "pad_token_id": 1, # 2 = BERT, 1 = RoBERTa "type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa "vocab_size": len(snake_case ), } UpperCAmelCase__ : Any = BertConfig.from_dict(snake_case ) UpperCAmelCase__ : Any = BertForMaskedLM(snake_case ) hf_bort_model.eval() # Parameter mapping table (Gluonnlp to Transformers) # * denotes layer index # # | Gluon Parameter | Transformers Parameter # | -------------------------------------------------------------- | ---------------------- # | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias` # | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight` # | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight` # | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight` # | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias` # | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight` # | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias` # | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight` # | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias` # | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight` # | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight` # | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias` # | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight` # | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias` # | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight` # | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias` # | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight` # | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias` # | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight` # Helper function to convert MXNET Arrays to PyTorch def to_torch(snake_case : Dict ) -> nn.Parameter: return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) ) # Check param shapes and map new HF param back def check_and_map_params(snake_case : Optional[int] , snake_case : Tuple ): UpperCAmelCase__ : Dict = hf_param.shape UpperCAmelCase__ : Optional[int] = to_torch(params[gluon_param] ) UpperCAmelCase__ : Optional[int] = gluon_param.shape assert ( shape_hf == shape_gluon ), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers' return gluon_param UpperCAmelCase__ : str = check_and_map_params( hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" ) UpperCAmelCase__ : Dict = check_and_map_params( hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" ) UpperCAmelCase__ : Dict = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" ) UpperCAmelCase__ : Tuple = check_and_map_params( hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" ) # Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them) UpperCAmelCase__ : Dict = torch.zeros_like( hf_bort_model.bert.embeddings.token_type_embeddings.weight.data ) for i in range(hf_bort_config.num_hidden_layers ): UpperCAmelCase__ : BertLayer = hf_bort_model.bert.encoder.layer[i] # self attention UpperCAmelCase__ : BertSelfAttention = layer.attention.self UpperCAmelCase__ : Optional[int] = check_and_map_params( self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' ) UpperCAmelCase__ : List[str] = check_and_map_params( self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' ) UpperCAmelCase__ : int = check_and_map_params( self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' ) UpperCAmelCase__ : Any = check_and_map_params( self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' ) UpperCAmelCase__ : Dict = check_and_map_params( self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' ) UpperCAmelCase__ : int = check_and_map_params( self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' ) # self attention output UpperCAmelCase__ : BertSelfOutput = layer.attention.output UpperCAmelCase__ : Union[str, Any] = check_and_map_params( self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' ) UpperCAmelCase__ : Optional[int] = check_and_map_params( self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' ) UpperCAmelCase__ : Union[str, Any] = check_and_map_params( self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' ) UpperCAmelCase__ : List[Any] = check_and_map_params( self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' ) # intermediate UpperCAmelCase__ : BertIntermediate = layer.intermediate UpperCAmelCase__ : str = check_and_map_params( intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' ) UpperCAmelCase__ : str = check_and_map_params( intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' ) # output UpperCAmelCase__ : BertOutput = layer.output UpperCAmelCase__ : Dict = check_and_map_params( bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' ) UpperCAmelCase__ : str = check_and_map_params( bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' ) UpperCAmelCase__ : Optional[Any] = check_and_map_params( bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' ) UpperCAmelCase__ : List[str] = check_and_map_params( bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' ) # Save space and energy 🎄 hf_bort_model.half() # Compare output of both models UpperCAmelCase__ : Optional[Any] = RobertaTokenizer.from_pretrained("roberta-base" ) UpperCAmelCase__ : Tuple = tokenizer.encode_plus(snake_case )["input_ids"] # Get gluon output UpperCAmelCase__ : str = mx.nd.array([input_ids] ) UpperCAmelCase__ : Dict = original_bort(inputs=snake_case , token_types=[] ) # Get Transformer output (save and reload model again) hf_bort_model.save_pretrained(snake_case ) UpperCAmelCase__ : str = BertModel.from_pretrained(snake_case ) hf_bort_model.eval() UpperCAmelCase__ : List[str] = tokenizer.encode_plus(snake_case , return_tensors="pt" ) UpperCAmelCase__ : int = hf_bort_model(**snake_case )[0] UpperCAmelCase__ : Optional[int] = output_gluon[0].asnumpy() UpperCAmelCase__ : Tuple = output_hf[0].detach().numpy() UpperCAmelCase__ : List[str] = np.max(np.abs(hf_layer - gluon_layer ) ).item() UpperCAmelCase__ : List[Any] = np.allclose(snake_case , snake_case , atol=1E-3 ) if success: print("✔️ Both model do output the same tensors" ) else: print("❌ Both model do **NOT** output the same tensors" ) print("Absolute difference is:" , snake_case ) if __name__ == "__main__": _lowerCAmelCase : List[str] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--bort_checkpoint_path""", default=None, type=str, required=True, help="""Path the official Bort params file.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCAmelCase : Optional[int] = parser.parse_args() convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
298
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
1
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available, is_vision_available, ) _lowerCAmelCase : str = { """configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""] } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Tuple = ["""ConvNextFeatureExtractor"""] _lowerCAmelCase : List[Any] = ["""ConvNextImageProcessor"""] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : int = [ """CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""", """ConvNextForImageClassification""", """ConvNextModel""", """ConvNextPreTrainedModel""", """ConvNextBackbone""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = [ """TFConvNextForImageClassification""", """TFConvNextModel""", """TFConvNextPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_convnext import ConvNextFeatureExtractor from .image_processing_convnext import ConvNextImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_convnext import ( CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvNextBackbone, ConvNextForImageClassification, ConvNextModel, ConvNextPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel else: import sys _lowerCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
298
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
1
"""simple docstring""" import enum import os from hashlib import shaaaa from typing import Optional from .. import config from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) class lowerCAmelCase__ ( enum.Enum ): SCREAMING_SNAKE_CASE_ ='''all_checks''' SCREAMING_SNAKE_CASE_ ='''basic_checks''' SCREAMING_SNAKE_CASE_ ='''no_checks''' class lowerCAmelCase__ ( __magic_name__ ): pass class lowerCAmelCase__ ( __magic_name__ ): pass class lowerCAmelCase__ ( __magic_name__ ): pass class lowerCAmelCase__ ( __magic_name__ ): pass def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[dict] , snake_case : dict , snake_case : Optional[Any]=None )-> Any: '''simple docstring''' if expected_checksums is None: logger.info("Unable to verify checksums." ) return if len(set(snake_case ) - set(snake_case ) ) > 0: raise ExpectedMoreDownloadedFiles(str(set(snake_case ) - set(snake_case ) ) ) if len(set(snake_case ) - set(snake_case ) ) > 0: raise UnexpectedDownloadedFile(str(set(snake_case ) - set(snake_case ) ) ) UpperCAmelCase__ : Optional[int] = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]] UpperCAmelCase__ : Optional[int] = " for " + verification_name if verification_name is not None else "" if len(snake_case ) > 0: raise NonMatchingChecksumError( f'Checksums didn\'t match{for_verification_name}:\n' f'{bad_urls}\n' "Set `verification_mode='no_checks'` to skip checksums verification and ignore this error" ) logger.info("All the checksums matched successfully" + for_verification_name ) class lowerCAmelCase__ ( __magic_name__ ): pass class lowerCAmelCase__ ( __magic_name__ ): pass class lowerCAmelCase__ ( __magic_name__ ): pass class lowerCAmelCase__ ( __magic_name__ ): pass def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[dict] , snake_case : dict )-> int: '''simple docstring''' if expected_splits is None: logger.info("Unable to verify splits sizes." ) return if len(set(snake_case ) - set(snake_case ) ) > 0: raise ExpectedMoreSplits(str(set(snake_case ) - set(snake_case ) ) ) if len(set(snake_case ) - set(snake_case ) ) > 0: raise UnexpectedSplits(str(set(snake_case ) - set(snake_case ) ) ) UpperCAmelCase__ : Optional[int] = [ {"expected": expected_splits[name], "recorded": recorded_splits[name]} for name in expected_splits if expected_splits[name].num_examples != recorded_splits[name].num_examples ] if len(snake_case ) > 0: raise NonMatchingSplitsSizesError(str(snake_case ) ) logger.info("All the splits matched successfully." ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : bool = True )-> dict: '''simple docstring''' if record_checksum: UpperCAmelCase__ : Optional[Any] = shaaaa() with open(snake_case , "rb" ) as f: for chunk in iter(lambda: f.read(1 << 20 ) , b"" ): m.update(snake_case ) UpperCAmelCase__ : List[str] = m.hexdigest() else: UpperCAmelCase__ : int = None return {"num_bytes": os.path.getsize(snake_case ), "checksum": checksum} def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> Any: '''simple docstring''' if dataset_size and config.IN_MEMORY_MAX_SIZE: return dataset_size < config.IN_MEMORY_MAX_SIZE else: return False
298
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
1
"""simple docstring""" import os import unittest from tempfile import TemporaryDirectory import torch import torch.nn as nn from accelerate.utils import ( OffloadedWeightsLoader, extract_submodules_state_dict, load_offloaded_weight, offload_state_dict, offload_weight, ) class lowerCAmelCase__ ( nn.Module ): def __init__( self : int ): '''simple docstring''' super().__init__() UpperCAmelCase__ : int = nn.Linear(3 , 4 ) UpperCAmelCase__ : Dict = nn.BatchNormad(4 ) UpperCAmelCase__ : Union[str, Any] = nn.Linear(4 , 5 ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] ): '''simple docstring''' return self.lineara(self.batchnorm(self.lineara(snake_case__ ) ) ) class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ModelForTest() with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case__ , model.state_dict() ) UpperCAmelCase__ : Optional[int] = os.path.join(snake_case__ , "index.json" ) self.assertTrue(os.path.isfile(snake_case__ ) ) # TODO: add tests on what is inside the index for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]: UpperCAmelCase__ : List[Any] = os.path.join(snake_case__ , f'{key}.dat' ) self.assertTrue(os.path.isfile(snake_case__ ) ) # TODO: add tests on the fact weights are properly loaded def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = [torch.floataa, torch.floataa, torch.bfloataa] for dtype in dtypes: UpperCAmelCase__ : str = torch.randn(2 , 3 , dtype=snake_case__ ) with TemporaryDirectory() as tmp_dir: UpperCAmelCase__ : Optional[int] = offload_weight(snake_case__ , "weight" , snake_case__ , {} ) UpperCAmelCase__ : Dict = os.path.join(snake_case__ , "weight.dat" ) self.assertTrue(os.path.isfile(snake_case__ ) ) self.assertDictEqual(snake_case__ , {"weight": {"shape": [2, 3], "dtype": str(snake_case__ ).split("." )[1]}} ) UpperCAmelCase__ : Union[str, Any] = load_offloaded_weight(snake_case__ , index["weight"] ) self.assertTrue(torch.equal(snake_case__ , snake_case__ ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = ModelForTest() UpperCAmelCase__ : List[Any] = model.state_dict() UpperCAmelCase__ : List[Any] = {k: v for k, v in state_dict.items() if "linear2" not in k} UpperCAmelCase__ : int = {k: v for k, v in state_dict.items() if "linear2" in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = OffloadedWeightsLoader(state_dict=snake_case__ , save_folder=snake_case__ ) # Every key is there with the right value self.assertEqual(sorted(snake_case__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(snake_case__ , weight_map[key] ) ) UpperCAmelCase__ : int = {k: v for k, v in state_dict.items() if "weight" in k} UpperCAmelCase__ : Tuple = {k: v for k, v in state_dict.items() if "weight" not in k} with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case__ , snake_case__ ) UpperCAmelCase__ : Any = OffloadedWeightsLoader(state_dict=snake_case__ , save_folder=snake_case__ ) # Every key is there with the right value self.assertEqual(sorted(snake_case__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(snake_case__ , weight_map[key] ) ) with TemporaryDirectory() as tmp_dir: offload_state_dict(snake_case__ , snake_case__ ) # Duplicates are removed UpperCAmelCase__ : Any = OffloadedWeightsLoader(state_dict=snake_case__ , save_folder=snake_case__ ) # Every key is there with the right value self.assertEqual(sorted(snake_case__ ) , sorted(state_dict.keys() ) ) for key, param in state_dict.items(): self.assertTrue(torch.allclose(snake_case__ , weight_map[key] ) ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = {"a.1": 0, "a.10": 1, "a.2": 2} UpperCAmelCase__ : int = extract_submodules_state_dict(snake_case__ , ["a.1", "a.2"] ) self.assertDictEqual(snake_case__ , {"a.1": 0, "a.2": 2} ) UpperCAmelCase__ : List[Any] = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2} UpperCAmelCase__ : Any = extract_submodules_state_dict(snake_case__ , ["a.1", "a.2"] ) self.assertDictEqual(snake_case__ , {"a.1.a": 0, "a.2.a": 2} )
298
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
1
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
1
"""simple docstring""" import argparse import torch from torch import nn from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "decoder.output_projection.weight", "_float_tensor", "encoder.embed_positions._float_tensor", "decoder.embed_positions._float_tensor", ] for k in ignore_keys: state_dict.pop(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Dict: '''simple docstring''' UpperCAmelCase__ : List[Any] = list(s_dict.keys() ) for key in keys: if "transformer_layers" in key: UpperCAmelCase__ : int = s_dict.pop(snake_case ) elif "subsample" in key: UpperCAmelCase__ : int = s_dict.pop(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : str = emb.weight.shape UpperCAmelCase__ : List[Any] = nn.Linear(snake_case , snake_case , bias=snake_case ) UpperCAmelCase__ : Optional[Any] = emb.weight.data return lin_layer def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : int )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : Dict = torch.load(snake_case , map_location="cpu" ) UpperCAmelCase__ : int = mam_aaa["args"] UpperCAmelCase__ : int = mam_aaa["model"] UpperCAmelCase__ : Union[str, Any] = state_dict["decoder.output_projection.weight"] remove_ignore_keys_(snake_case ) rename_keys(snake_case ) UpperCAmelCase__ : List[str] = state_dict["decoder.embed_tokens.weight"].shape[0] UpperCAmelCase__ : Optional[Any] = args.share_decoder_input_output_embed UpperCAmelCase__ : int = [int(snake_case ) for i in args.conv_kernel_sizes.split("," )] UpperCAmelCase__ : Tuple = SpeechaTextConfig( vocab_size=snake_case , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(snake_case ) , conv_channels=args.conv_channels , conv_kernel_sizes=snake_case , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=snake_case , num_beams=5 , max_length=200 , use_cache=snake_case , decoder_start_token_id=2 , early_stopping=snake_case , ) UpperCAmelCase__ : Optional[int] = SpeechaTextForConditionalGeneration(snake_case ) UpperCAmelCase__ , UpperCAmelCase__ : Dict = model.model.load_state_dict(snake_case , strict=snake_case ) if len(snake_case ) > 0 and not set(snake_case ) <= { "encoder.embed_positions.weights", "decoder.embed_positions.weights", }: raise ValueError( "Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing," f' but all the following weights are missing {missing}' ) if tie_embeds: UpperCAmelCase__ : Union[str, Any] = make_linear_from_emb(model.model.decoder.embed_tokens ) else: UpperCAmelCase__ : List[Any] = lm_head_weights model.save_pretrained(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser() # Required parameters parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""") parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") _lowerCAmelCase : Union[str, Any] = parser.parse_args() convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
298
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
1
"""simple docstring""" from ..utils import ( OptionalDependencyNotAvailable, is_flax_available, is_scipy_available, is_torch_available, is_torchsde_available, ) try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_pt_objects import * # noqa F403 else: from .scheduling_consistency_models import CMStochasticIterativeScheduler from .scheduling_ddim import DDIMScheduler from .scheduling_ddim_inverse import DDIMInverseScheduler from .scheduling_ddim_parallel import DDIMParallelScheduler from .scheduling_ddpm import DDPMScheduler from .scheduling_ddpm_parallel import DDPMParallelScheduler from .scheduling_deis_multistep import DEISMultistepScheduler from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler from .scheduling_euler_discrete import EulerDiscreteScheduler from .scheduling_heun_discrete import HeunDiscreteScheduler from .scheduling_ipndm import IPNDMScheduler from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler from .scheduling_karras_ve import KarrasVeScheduler from .scheduling_pndm import PNDMScheduler from .scheduling_repaint import RePaintScheduler from .scheduling_sde_ve import ScoreSdeVeScheduler from .scheduling_sde_vp import ScoreSdeVpScheduler from .scheduling_unclip import UnCLIPScheduler from .scheduling_unipc_multistep import UniPCMultistepScheduler from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin from .scheduling_vq_diffusion import VQDiffusionScheduler try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_flax_objects import * # noqa F403 else: from .scheduling_ddim_flax import FlaxDDIMScheduler from .scheduling_ddpm_flax import FlaxDDPMScheduler from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler from .scheduling_pndm_flax import FlaxPNDMScheduler from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler from .scheduling_utils_flax import ( FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, broadcast_to_shape_from_left, ) try: if not (is_torch_available() and is_scipy_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_scipy_objects import * # noqa F403 else: from .scheduling_lms_discrete import LMSDiscreteScheduler try: if not (is_torch_available() and is_torchsde_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403 else: from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
298
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
1
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : int = {"""vocab_file""": """spiece.model"""} _lowerCAmelCase : Union[str, Any] = { """vocab_file""": { """bert_for_seq_generation""": ( """https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model""" ), } } _lowerCAmelCase : int = {"""bert_for_seq_generation""": 512} class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ =[] SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask'''] def __init__( self : Dict , snake_case__ : List[Any] , snake_case__ : Tuple="<s>" , snake_case__ : Tuple="</s>" , snake_case__ : Tuple="<unk>" , snake_case__ : Optional[int]="<pad>" , snake_case__ : Dict="<::::>" , snake_case__ : Optional[Dict[str, Any]] = None , **snake_case__ : Tuple , ): '''simple docstring''' UpperCAmelCase__ : List[Any] = {} if sp_model_kwargs is None else sp_model_kwargs # Add extra_ids to the special token list super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , pad_token=snake_case__ , sep_token=snake_case__ , sp_model_kwargs=self.sp_model_kwargs , **snake_case__ , ) UpperCAmelCase__ : Union[str, Any] = vocab_file UpperCAmelCase__ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(snake_case__ ) @property def __a ( self : List[str] ): '''simple docstring''' return self.sp_model.get_piece_size() def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : str = {self.convert_ids_to_tokens(snake_case__ ): i for i in range(self.vocab_size )} vocab.update(self.added_tokens_encoder ) return vocab def __getstate__( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.__dict__.copy() UpperCAmelCase__ : Optional[int] = None return state def __setstate__( self : List[Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = d # for backward compatibility if not hasattr(self , "sp_model_kwargs" ): UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs ) self.sp_model.Load(self.vocab_file ) def __a ( self : Any , snake_case__ : str ): '''simple docstring''' return self.sp_model.encode(snake_case__ , out_type=snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : str ): '''simple docstring''' return self.sp_model.piece_to_id(snake_case__ ) def __a ( self : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = self.sp_model.IdToPiece(snake_case__ ) return token def __a ( self : int , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = [] UpperCAmelCase__ : Dict = "" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(snake_case__ ) + token UpperCAmelCase__ : List[str] = [] else: current_sub_tokens.append(snake_case__ ) out_string += self.sp_model.decode(snake_case__ ) return out_string.strip() def __a ( self : int , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : List[str] = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ) and os.path.isfile(self.vocab_file ): copyfile(self.vocab_file , snake_case__ ) elif not os.path.isfile(self.vocab_file ): with open(snake_case__ , "wb" ) as fi: UpperCAmelCase__ : Optional[Any] = self.sp_model.serialized_model_proto() fi.write(snake_case__ ) return (out_vocab_file,)
298
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
1
"""simple docstring""" _lowerCAmelCase : Any = frozenset( [ """prompt""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _lowerCAmelCase : Optional[int] = frozenset(["""prompt""", """negative_prompt"""]) _lowerCAmelCase : List[str] = frozenset([]) _lowerCAmelCase : Optional[int] = frozenset(["""image"""]) _lowerCAmelCase : Union[str, Any] = frozenset( [ """image""", """height""", """width""", """guidance_scale""", ] ) _lowerCAmelCase : Union[str, Any] = frozenset(["""image"""]) _lowerCAmelCase : Optional[Any] = frozenset( [ """prompt""", """image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _lowerCAmelCase : Tuple = frozenset(["""prompt""", """image""", """negative_prompt"""]) _lowerCAmelCase : Optional[Any] = frozenset( [ # Text guided image variation with an image mask """prompt""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", ] ) _lowerCAmelCase : Optional[Any] = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""]) _lowerCAmelCase : int = frozenset( [ # image variation with an image mask """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _lowerCAmelCase : int = frozenset(["""image""", """mask_image"""]) _lowerCAmelCase : Tuple = frozenset( [ """example_image""", """image""", """mask_image""", """height""", """width""", """guidance_scale""", ] ) _lowerCAmelCase : Optional[int] = frozenset(["""example_image""", """image""", """mask_image"""]) _lowerCAmelCase : Union[str, Any] = frozenset(["""class_labels"""]) _lowerCAmelCase : Optional[Any] = frozenset(["""class_labels"""]) _lowerCAmelCase : List[str] = frozenset(["""batch_size"""]) _lowerCAmelCase : Any = frozenset([]) _lowerCAmelCase : Optional[Any] = frozenset(["""batch_size"""]) _lowerCAmelCase : List[str] = frozenset([]) _lowerCAmelCase : int = frozenset( [ """prompt""", """audio_length_in_s""", """guidance_scale""", """negative_prompt""", """prompt_embeds""", """negative_prompt_embeds""", """cross_attention_kwargs""", ] ) _lowerCAmelCase : Optional[Any] = frozenset(["""prompt""", """negative_prompt"""]) _lowerCAmelCase : Union[str, Any] = frozenset(["""input_tokens"""]) _lowerCAmelCase : Optional[int] = frozenset(["""input_tokens"""])
298
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
1
"""simple docstring""" import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : str = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""} _lowerCAmelCase : Tuple = { """vocab_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt""" ), }, """tokenizer_file""": { """yjernite/retribert-base-uncased""": ( """https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json""" ), }, } _lowerCAmelCase : Tuple = { """yjernite/retribert-base-uncased""": 512, } _lowerCAmelCase : List[str] = { """yjernite/retribert-base-uncased""": {"""do_lower_case""": True}, } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ =PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ =RetriBertTokenizer SCREAMING_SNAKE_CASE_ =['''input_ids''', '''attention_mask'''] def __init__( self : Dict , snake_case__ : Dict=None , snake_case__ : Any=None , snake_case__ : int=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : Any="[PAD]" , snake_case__ : Optional[Any]="[CLS]" , snake_case__ : Union[str, Any]="[MASK]" , snake_case__ : List[str]=True , snake_case__ : Tuple=None , **snake_case__ : int , ): '''simple docstring''' super().__init__( snake_case__ , tokenizer_file=snake_case__ , do_lower_case=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , pad_token=snake_case__ , cls_token=snake_case__ , mask_token=snake_case__ , tokenize_chinese_chars=snake_case__ , strip_accents=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , snake_case__ ) != do_lower_case or normalizer_state.get("strip_accents" , snake_case__ ) != strip_accents or normalizer_state.get("handle_chinese_chars" , snake_case__ ) != tokenize_chinese_chars ): UpperCAmelCase__ : str = getattr(snake_case__ , normalizer_state.pop("type" ) ) UpperCAmelCase__ : Optional[Any] = do_lower_case UpperCAmelCase__ : Any = strip_accents UpperCAmelCase__ : Optional[int] = tokenize_chinese_chars UpperCAmelCase__ : List[str] = normalizer_class(**snake_case__ ) UpperCAmelCase__ : List[str] = do_lower_case def __a ( self : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : str = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self : Tuple , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : str = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' UpperCAmelCase__ : Dict = self._tokenizer.model.save(snake_case__ , name=snake_case__ ) return tuple(snake_case__ )
298
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
1
"""simple docstring""" import json import os import shutil import tempfile import unittest import numpy as np import pytest from transformers import CLIPTokenizer, CLIPTokenizerFast from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES from transformers.testing_utils import require_vision from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available if is_vision_available(): from PIL import Image from transformers import OwlViTImageProcessor, OwlViTProcessor @require_vision class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = tempfile.mkdtemp() # fmt: off UpperCAmelCase__ : List[str] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"] # fmt: on UpperCAmelCase__ : Dict = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : int = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""] UpperCAmelCase__ : Optional[int] = {"unk_token": "<unk>"} UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" , encoding="utf-8" ) as fp: fp.write(json.dumps(snake_case__ ) + "\n" ) with open(self.merges_file , "w" , encoding="utf-8" ) as fp: fp.write("\n".join(snake_case__ ) ) UpperCAmelCase__ : Union[str, Any] = { "do_resize": True, "size": 2_0, "do_center_crop": True, "crop_size": 1_8, "do_normalize": True, "image_mean": [0.4814_5466, 0.457_8275, 0.4082_1073], "image_std": [0.2686_2954, 0.2613_0258, 0.2757_7711], } UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , snake_case__ ) with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp: json.dump(snake_case__ , snake_case__ ) def __a ( self : Dict , **snake_case__ : Any ): '''simple docstring''' return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case__ ) def __a ( self : Tuple , **snake_case__ : Any ): '''simple docstring''' return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **snake_case__ ) def __a ( self : Optional[Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' shutil.rmtree(self.tmpdirname ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )] UpperCAmelCase__ : List[str] = [Image.fromarray(np.moveaxis(snake_case__ , 0 , -1 ) ) for x in image_inputs] return image_inputs def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_tokenizer() UpperCAmelCase__ : int = self.get_rust_tokenizer() UpperCAmelCase__ : Dict = self.get_image_processor() UpperCAmelCase__ : str = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_slow.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) processor_fast.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(self.tmpdirname ) self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() ) self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() ) self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() ) self.assertIsInstance(processor_slow.tokenizer , snake_case__ ) self.assertIsInstance(processor_fast.tokenizer , snake_case__ ) self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() ) self.assertIsInstance(processor_slow.image_processor , snake_case__ ) self.assertIsInstance(processor_fast.image_processor , snake_case__ ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() ) processor.save_pretrained(self.tmpdirname ) UpperCAmelCase__ : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" ) UpperCAmelCase__ : int = self.get_image_processor(do_normalize=snake_case__ ) UpperCAmelCase__ : Optional[int] = OwlViTProcessor.from_pretrained( self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=snake_case__ ) self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() ) self.assertIsInstance(processor.tokenizer , snake_case__ ) self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() ) self.assertIsInstance(processor.image_processor , snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_image_processor() UpperCAmelCase__ : Dict = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : str = self.prepare_image_inputs() UpperCAmelCase__ : Optional[Any] = image_processor(snake_case__ , return_tensors="np" ) UpperCAmelCase__ : int = processor(images=snake_case__ , return_tensors="np" ) for key in input_image_proc.keys(): self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_image_processor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : List[str] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : List[Any] = "lower newer" UpperCAmelCase__ : Union[str, Any] = processor(text=snake_case__ , return_tensors="np" ) UpperCAmelCase__ : Optional[Any] = tokenizer(snake_case__ , return_tensors="np" ) for key in encoded_tok.keys(): self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.get_image_processor() UpperCAmelCase__ : List[str] = self.get_tokenizer() UpperCAmelCase__ : List[Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : Optional[int] = "lower newer" UpperCAmelCase__ : Optional[int] = self.prepare_image_inputs() UpperCAmelCase__ : Optional[Any] = processor(text=snake_case__ , images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = "google/owlvit-base-patch32" UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor.from_pretrained(snake_case__ ) UpperCAmelCase__ : Tuple = ["cat", "nasa badge"] UpperCAmelCase__ : Any = processor(text=snake_case__ ) UpperCAmelCase__ : Dict = 1_6 self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = "google/owlvit-base-patch32" UpperCAmelCase__ : Dict = OwlViTProcessor.from_pretrained(snake_case__ ) UpperCAmelCase__ : List[str] = [["cat", "nasa badge"], ["person"]] UpperCAmelCase__ : Union[str, Any] = processor(text=snake_case__ ) UpperCAmelCase__ : Any = 1_6 UpperCAmelCase__ : Optional[Any] = len(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = max([len(snake_case__ ) for texts in input_texts] ) self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = "google/owlvit-base-patch32" UpperCAmelCase__ : List[Any] = OwlViTProcessor.from_pretrained(snake_case__ ) UpperCAmelCase__ : Dict = ["cat", "nasa badge"] UpperCAmelCase__ : Tuple = processor(text=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = 1_6 UpperCAmelCase__ : List[Any] = inputs["input_ids"] UpperCAmelCase__ : str = [ [4_9_4_0_6, 2_3_6_8, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [4_9_4_0_6, 6_8_4_1, 1_1_3_0_1, 4_9_4_0_7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], ] self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] ) self.assertEqual(inputs["input_ids"].shape , (2, seq_length) ) self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] ) self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : Optional[int] = self.get_tokenizer() UpperCAmelCase__ : Union[str, Any] = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : List[str] = self.prepare_image_inputs() UpperCAmelCase__ : Optional[Any] = self.prepare_image_inputs() UpperCAmelCase__ : int = processor(images=snake_case__ , query_images=snake_case__ ) self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] ) # test if it raises when no input is passed with pytest.raises(snake_case__ ): processor() def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_image_processor() UpperCAmelCase__ : Tuple = self.get_tokenizer() UpperCAmelCase__ : Dict = OwlViTProcessor(tokenizer=snake_case__ , image_processor=snake_case__ ) UpperCAmelCase__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]] UpperCAmelCase__ : str = processor.batch_decode(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.batch_decode(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ )
298
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : list[int] , snake_case : int )-> int: '''simple docstring''' def count_of_possible_combinations(snake_case : int ) -> int: if target < 0: return 0 if target == 0: return 1 return sum(count_of_possible_combinations(target - item ) for item in array ) return count_of_possible_combinations(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : list[int] , snake_case : int )-> int: '''simple docstring''' def count_of_possible_combinations_with_dp_array( snake_case : int , snake_case : list[int] ) -> int: if target < 0: return 0 if target == 0: return 1 if dp_array[target] != -1: return dp_array[target] UpperCAmelCase__ : str = sum( count_of_possible_combinations_with_dp_array(target - item , snake_case ) for item in array ) UpperCAmelCase__ : List[str] = answer return answer UpperCAmelCase__ : Dict = [-1] * (target + 1) return count_of_possible_combinations_with_dp_array(snake_case , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : list[int] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Dict = [0] * (target + 1) UpperCAmelCase__ : int = 1 for i in range(1 , target + 1 ): for j in range(snake_case ): if i - array[j] >= 0: dp_array[i] += dp_array[i - array[j]] return dp_array[target] if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Optional[int] = 3 _lowerCAmelCase : Any = 5 _lowerCAmelCase : Optional[Any] = [1, 2, 5] print(combination_sum_iv(n, array, target))
298
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
1
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
1
"""simple docstring""" import gc import random import unittest import numpy as np import torch from PIL import Image from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer from diffusers import ( AutoencoderKL, DDIMScheduler, EulerAncestralDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, StableDiffusionInstructPixaPixPipeline, UNetaDConditionModel, ) from diffusers.image_processor import VaeImageProcessor from diffusers.utils import floats_tensor, load_image, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..pipeline_params import ( IMAGE_TO_IMAGE_IMAGE_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS, ) from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin enable_full_determinism() class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =StableDiffusionInstructPixaPixPipeline SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''cross_attention_kwargs'''} SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =IMAGE_TO_IMAGE_IMAGE_PARAMS SCREAMING_SNAKE_CASE_ =IMAGE_TO_IMAGE_IMAGE_PARAMS def __a ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = UNetaDConditionModel( block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , ) UpperCAmelCase__ : Dict = PNDMScheduler(skip_prk_steps=snake_case__ ) torch.manual_seed(0 ) UpperCAmelCase__ : Union[str, Any] = AutoencoderKL( block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , ) torch.manual_seed(0 ) UpperCAmelCase__ : Tuple = CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) UpperCAmelCase__ : Dict = CLIPTextModel(snake_case__ ) UpperCAmelCase__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) UpperCAmelCase__ : List[Any] = { "unet": unet, "scheduler": scheduler, "vae": vae, "text_encoder": text_encoder, "tokenizer": tokenizer, "safety_checker": None, "feature_extractor": None, } return components def __a ( self : Any , snake_case__ : List[Any] , snake_case__ : Optional[int]=0 ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case__ ) ).to(snake_case__ ) UpperCAmelCase__ : Any = image.cpu().permute(0 , 2 , 3 , 1 )[0] UpperCAmelCase__ : Any = Image.fromarray(np.uinta(snake_case__ ) ).convert("RGB" ) if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : int = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Dict = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : List[str] = { "prompt": "A painting of a squirrel eating a burger", "image": image, "generator": generator, "num_inference_steps": 2, "guidance_scale": 6.0, "image_guidance_scale": 1, "output_type": "numpy", } return inputs def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : Optional[int] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : Optional[Any] = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : str = sd_pipe(**snake_case__ ).images UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase__ : Any = np.array([0.7526, 0.3750, 0.4547, 0.6117, 0.5866, 0.5016, 0.4327, 0.5642, 0.4815] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[Any] = self.get_dummy_components() UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : str = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : int = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : List[Any] = "french fries" UpperCAmelCase__ : Any = sd_pipe(**snake_case__ , negative_prompt=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = output.images UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase__ : Any = np.array([0.7511, 0.3642, 0.4553, 0.6236, 0.5797, 0.5013, 0.4343, 0.5611, 0.4831] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[str] = self.get_dummy_components() UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : List[str] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : List[Any] = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = [inputs["prompt"]] * 2 UpperCAmelCase__ : Optional[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 255.0 UpperCAmelCase__ : Union[str, Any] = torch.from_numpy(snake_case__ ).unsqueeze(0 ).to(snake_case__ ) UpperCAmelCase__ : Optional[Any] = image / 2 + 0.5 UpperCAmelCase__ : int = image.permute(0 , 3 , 1 , 2 ) UpperCAmelCase__ : Tuple = image.repeat(2 , 1 , 1 , 1 ) UpperCAmelCase__ : Any = sd_pipe(**snake_case__ ).images UpperCAmelCase__ : Optional[int] = image[-1, -3:, -3:, -1] assert image.shape == (2, 3_2, 3_2, 3) UpperCAmelCase__ : str = np.array([0.5812, 0.5748, 0.5222, 0.5908, 0.5695, 0.7174, 0.6804, 0.5523, 0.5579] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : List[str] = self.get_dummy_components() UpperCAmelCase__ : Optional[Any] = EulerAncestralDiscreteScheduler( beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" ) UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : Union[str, Any] = sd_pipe.to(snake_case__ ) sd_pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : Optional[int] = self.get_dummy_inputs(snake_case__ ) UpperCAmelCase__ : Tuple = sd_pipe(**snake_case__ ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1] UpperCAmelCase__ : int = [round(snake_case__ , 4 ) for x in image_slice.flatten().tolist()] print(",".join([str(snake_case__ ) for x in slice] ) ) assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase__ : int = np.array([0.7417, 0.3842, 0.4732, 0.5776, 0.5891, 0.5139, 0.4052, 0.5673, 0.4986] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Dict ): '''simple docstring''' super().test_inference_batch_single_identical(expected_max_diff=3e-3 ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_dummy_components() UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline(**snake_case__ ) UpperCAmelCase__ : List[str] = VaeImageProcessor(do_resize=snake_case__ , do_normalize=snake_case__ ) UpperCAmelCase__ : Dict = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) UpperCAmelCase__ : List[str] = pipe(**self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" ) )[0] UpperCAmelCase__ : Union[str, Any] = components["vae"] UpperCAmelCase__ : int = self.get_dummy_inputs_by_type(snake_case__ , input_image_type="pt" ) for image_param in self.image_latents_params: if image_param in inputs.keys(): UpperCAmelCase__ : Dict = vae.encode(inputs[image_param] ).latent_dist.mode() UpperCAmelCase__ : str = pipe(**snake_case__ )[0] UpperCAmelCase__ : Optional[Any] = np.abs(out - out_latents_inputs ).max() self.assertLess(snake_case__ , 1e-4 , "passing latents as image input generate different result from passing image" ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Optional[Any] ): '''simple docstring''' super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Optional[Any] , snake_case__ : int=0 ): '''simple docstring''' UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" ) UpperCAmelCase__ : str = { "prompt": "turn him into a cyborg", "image": image, "generator": generator, "num_inference_steps": 3, "guidance_scale": 7.5, "image_guidance_scale": 1.0, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Optional[Any] = self.get_inputs() UpperCAmelCase__ : Tuple = pipe(**snake_case__ ).images UpperCAmelCase__ : List[str] = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : int = np.array([0.5902, 0.6015, 0.6027, 0.5983, 0.6092, 0.6061, 0.5765, 0.5785, 0.5555] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ ) UpperCAmelCase__ : int = LMSDiscreteScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Any = self.get_inputs() UpperCAmelCase__ : List[str] = pipe(**snake_case__ ).images UpperCAmelCase__ : Tuple = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : Optional[int] = np.array([0.6578, 0.6817, 0.6972, 0.6761, 0.6856, 0.6916, 0.6428, 0.6516, 0.6301] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ ) UpperCAmelCase__ : int = DDIMScheduler.from_config(pipe.scheduler.config ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = self.get_inputs() UpperCAmelCase__ : List[Any] = pipe(**snake_case__ ).images UpperCAmelCase__ : Dict = image[0, -3:, -3:, -1].flatten() assert image.shape == (1, 5_1_2, 5_1_2, 3) UpperCAmelCase__ : Any = np.array([0.3828, 0.3834, 0.3818, 0.3792, 0.3865, 0.3752, 0.3792, 0.3847, 0.3753] ) assert np.abs(expected_slice - image_slice ).max() < 1e-3 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = 0 def callback_fn(snake_case__ : int , snake_case__ : int , snake_case__ : torch.FloatTensor ) -> None: UpperCAmelCase__ : Optional[int] = True nonlocal number_of_steps number_of_steps += 1 if step == 1: UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) UpperCAmelCase__ : Any = latents[0, -3:, -3:, -1] UpperCAmelCase__ : str = np.array([-0.2463, -0.4644, -0.9756, 1.5176, 1.4414, 0.7866, 0.9897, 0.8521, 0.7983] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 elif step == 2: UpperCAmelCase__ : Tuple = latents.detach().cpu().numpy() assert latents.shape == (1, 4, 6_4, 6_4) UpperCAmelCase__ : Optional[int] = latents[0, -3:, -3:, -1] UpperCAmelCase__ : Optional[Any] = np.array([-0.2644, -0.4626, -0.9653, 1.5176, 1.4551, 0.7686, 0.9805, 0.8452, 0.8115] ) assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2 UpperCAmelCase__ : Union[str, Any] = False UpperCAmelCase__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa ) UpperCAmelCase__ : int = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Tuple = self.get_inputs() pipe(**snake_case__ , callback=snake_case__ , callback_steps=1 ) assert callback_fn.has_been_called assert number_of_steps == 3 def __a ( self : Dict ): '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : Any = StableDiffusionInstructPixaPixPipeline.from_pretrained( "timbrooks/instruct-pix2pix" , safety_checker=snake_case__ , torch_dtype=torch.floataa ) UpperCAmelCase__ : Tuple = pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing(1 ) pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Optional[Any] = self.get_inputs() UpperCAmelCase__ : str = pipe(**snake_case__ ) UpperCAmelCase__ : str = torch.cuda.max_memory_allocated() # make sure that less than 2.2 GB is allocated assert mem_bytes < 2.2 * 1_0**9 def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_inputs() # resize to resolution that is divisible by 8 but not 16 or 32 UpperCAmelCase__ : Dict = inputs["image"].resize((5_0_4, 5_0_4) ) UpperCAmelCase__ : Dict = "timbrooks/instruct-pix2pix" UpperCAmelCase__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained( snake_case__ , safety_checker=snake_case__ , ) pipe.to(snake_case__ ) pipe.set_progress_bar_config(disable=snake_case__ ) pipe.enable_attention_slicing() UpperCAmelCase__ : Optional[int] = pipe(**snake_case__ ) UpperCAmelCase__ : Union[str, Any] = output.images[0] UpperCAmelCase__ : Union[str, Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1] assert image.shape == (5_0_4, 5_0_4, 3) UpperCAmelCase__ : Optional[int] = np.array([0.2726, 0.2529, 0.2664, 0.2655, 0.2641, 0.2642, 0.2591, 0.2649, 0.2590] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
298
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
1
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
1
"""simple docstring""" import unittest from transformers import TrOCRConfig from transformers.testing_utils import is_torch_available, require_torch, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM @require_torch class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : Any=9_9 , snake_case__ : Dict=1_3 , snake_case__ : Optional[Any]=1_6 , snake_case__ : List[str]=7 , snake_case__ : Dict=True , snake_case__ : Union[str, Any]=True , snake_case__ : List[str]=True , snake_case__ : Tuple=False , snake_case__ : Optional[Any]=True , snake_case__ : Optional[Any]=2 , snake_case__ : Tuple=3_2 , snake_case__ : int=4 , snake_case__ : Any=4 , snake_case__ : Optional[int]=3_0 , snake_case__ : Tuple=0 , snake_case__ : Optional[Any]=1 , snake_case__ : Tuple=2 , snake_case__ : List[str]=None , ): '''simple docstring''' UpperCAmelCase__ : List[Any] = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = decoder_seq_length # For common tests UpperCAmelCase__ : Dict = self.decoder_seq_length UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : Tuple = use_attention_mask UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Dict = d_model UpperCAmelCase__ : Dict = d_model UpperCAmelCase__ : Union[str, Any] = decoder_layers UpperCAmelCase__ : Union[str, Any] = decoder_layers UpperCAmelCase__ : Tuple = decoder_ffn_dim UpperCAmelCase__ : Dict = decoder_attention_heads UpperCAmelCase__ : Optional[Any] = decoder_attention_heads UpperCAmelCase__ : Dict = eos_token_id UpperCAmelCase__ : Optional[int] = bos_token_id UpperCAmelCase__ : Tuple = pad_token_id UpperCAmelCase__ : int = decoder_start_token_id UpperCAmelCase__ : str = use_cache UpperCAmelCase__ : Optional[Any] = max_position_embeddings UpperCAmelCase__ : int = None UpperCAmelCase__ : Any = decoder_seq_length UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Tuple = 1 def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCAmelCase__ : Tuple = None if self.use_attention_mask: UpperCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 ) UpperCAmelCase__ : Optional[Any] = None if self.use_labels: UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size ) UpperCAmelCase__ : List[str] = TrOCRConfig( vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , ) return (config, input_ids, attention_mask, lm_labels) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any] , ): '''simple docstring''' UpperCAmelCase__ : Dict = True UpperCAmelCase__ : Optional[int] = TrOCRDecoder(config=snake_case__ ).to(snake_case__ ).eval() UpperCAmelCase__ : str = input_ids[:2] input_ids[input_ids == 0] += 1 # first forward pass UpperCAmelCase__ : List[Any] = model(snake_case__ , use_cache=snake_case__ ) UpperCAmelCase__ : Tuple = model(snake_case__ ) UpperCAmelCase__ : List[str] = model(snake_case__ , use_cache=snake_case__ ) self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) ) self.parent.assertTrue(len(snake_case__ ) == len(snake_case__ ) + 1 ) UpperCAmelCase__ : Optional[int] = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids UpperCAmelCase__ : List[Any] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1 # append to next input_ids and UpperCAmelCase__ : Any = torch.cat([input_ids, next_tokens] , dim=-1 ) UpperCAmelCase__ : Any = model(snake_case__ )["last_hidden_state"] UpperCAmelCase__ : List[Any] = model(snake_case__ , past_key_values=snake_case__ )["last_hidden_state"] # select random slice UpperCAmelCase__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item() UpperCAmelCase__ : Tuple = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() UpperCAmelCase__ : List[Any] = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Any = config_and_inputs UpperCAmelCase__ : Optional[Any] = {"input_ids": input_ids, "attention_mask": attention_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =(TrOCRForCausalLM,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ ={'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {} SCREAMING_SNAKE_CASE_ =True SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = TrOCRStandaloneDecoderModelTester(self , is_training=snake_case__ ) UpperCAmelCase__ : Dict = ConfigTester(self , config_class=snake_case__ ) def __a ( self : Any ): '''simple docstring''' pass def __a ( self : int ): '''simple docstring''' pass def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Any ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' return @unittest.skip("The model doesn't support left padding" ) # and it's not used enough to be worth fixing :) def __a ( self : Union[str, Any] ): '''simple docstring''' pass
298
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
1
"""simple docstring""" import argparse from torch import nn # transformers_old should correspond to branch `save_old_prophetnet_model_structure` here # original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively from transformers_old.modeling_prophetnet import ( ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld, ) from transformers_old.modeling_xlm_prophetnet import ( XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld, ) from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' if "xprophetnet" in prophetnet_checkpoint_path: UpperCAmelCase__ : Any = XLMProphetNetForConditionalGenerationOld.from_pretrained(snake_case ) UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = XLMProphetNetForConditionalGeneration.from_pretrained( snake_case , output_loading_info=snake_case ) else: UpperCAmelCase__ : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(snake_case ) UpperCAmelCase__ , UpperCAmelCase__ : List[str] = ProphetNetForConditionalGeneration.from_pretrained( snake_case , output_loading_info=snake_case ) UpperCAmelCase__ : Dict = ["key_proj", "value_proj", "query_proj"] UpperCAmelCase__ : int = { "self_attn": "ngram_self_attn", "cross_attn": "encoder_attn", "cross_attn_layer_norm": "encoder_attn_layer_norm", "feed_forward_layer_norm": "final_layer_norm", "feed_forward": "", "intermediate": "fc1", "output": "fc2", "key_proj": "k_proj", "query_proj": "q_proj", "value_proj": "v_proj", "word_embeddings": "embed_tokens", "embeddings_layer_norm": "emb_layer_norm", "relative_pos_embeddings": "relative_linear", "ngram_embeddings": "ngram_input_embed", "position_embeddings": "embed_positions", } for key in loading_info["missing_keys"]: UpperCAmelCase__ : Optional[Any] = key.split("." ) if attributes[0] == "lm_head": UpperCAmelCase__ : Optional[Any] = prophet UpperCAmelCase__ : List[str] = prophet_old else: UpperCAmelCase__ : int = prophet.prophetnet UpperCAmelCase__ : Tuple = prophet_old.model UpperCAmelCase__ : str = False for attribute in attributes: if attribute in mapping: UpperCAmelCase__ : List[Any] = mapping[attribute] if not hasattr(snake_case , snake_case ) and len(snake_case ) > 0: UpperCAmelCase__ : List[str] = attribute elif hasattr(snake_case , snake_case ): UpperCAmelCase__ : Tuple = attribute if attribute == "weight": assert old_model.weight.shape == model.weight.shape, "Shapes have to match!" UpperCAmelCase__ : Optional[int] = old_model.weight logger.info(f'{attribute} is initialized.' ) UpperCAmelCase__ : List[str] = True break elif attribute == "bias": assert old_model.bias.shape == model.bias.shape, "Shapes have to match!" UpperCAmelCase__ : Tuple = old_model.bias logger.info(f'{attribute} is initialized' ) UpperCAmelCase__ : Any = True break elif attribute in special_keys and hasattr(snake_case , "in_proj_weight" ): UpperCAmelCase__ : Optional[int] = old_model.in_proj_weight.shape[0] // 3 UpperCAmelCase__ : Optional[int] = getattr(snake_case , snake_case ) param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match" param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match" if attribute == "query_proj": UpperCAmelCase__ : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] ) UpperCAmelCase__ : Dict = nn.Parameter(old_model.in_proj_bias[:embed_dim] ) elif attribute == "key_proj": UpperCAmelCase__ : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] ) UpperCAmelCase__ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] ) elif attribute == "value_proj": UpperCAmelCase__ : List[str] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] ) UpperCAmelCase__ : List[str] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] ) UpperCAmelCase__ : int = True break elif attribute == "position_embeddings": assert ( model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1] ), "Hidden size has to match" assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings." UpperCAmelCase__ : Any = nn.Parameter(old_model.embed_positions.weight[:512, :] ) UpperCAmelCase__ : List[Any] = True break if attribute.isdigit(): UpperCAmelCase__ : str = model[int(snake_case )] UpperCAmelCase__ : Optional[int] = old_model[int(snake_case )] else: UpperCAmelCase__ : Tuple = getattr(snake_case , snake_case ) if old_attribute == "": UpperCAmelCase__ : str = old_model else: if not hasattr(snake_case , snake_case ): raise ValueError(f'{old_model} does not have {old_attribute}' ) UpperCAmelCase__ : Optional[int] = getattr(snake_case , snake_case ) if not is_key_init: raise ValueError(f'{key} was not correctly initialized!' ) print(f'Saving model to {pytorch_dump_folder_path}' ) prophet.save_pretrained(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser() # Required parameters parser.add_argument( """--prophetnet_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump.""" ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) _lowerCAmelCase : Optional[Any] = parser.parse_args() convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
298
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
1
"""simple docstring""" from pathlib import PurePosixPath from typing import Optional import fsspec from fsspec import AbstractFileSystem from huggingface_hub.hf_api import DatasetInfo from ..utils.file_utils import get_authentication_headers_for_url from ..utils.hub import hf_hub_url class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''''' SCREAMING_SNAKE_CASE_ ='''hf-legacy''' # "hf://"" is reserved for hffs def __init__( self : Any , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[str] = None , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__(self , **snake_case__ ) UpperCAmelCase__ : Optional[Any] = repo_info UpperCAmelCase__ : Optional[Any] = token UpperCAmelCase__ : Union[str, Any] = None def __a ( self : Optional[Any] ): '''simple docstring''' if self.dir_cache is None: UpperCAmelCase__ : Any = {} for hf_file in self.repo_info.siblings: # TODO(QL): add sizes UpperCAmelCase__ : str = { "name": hf_file.rfilename, "size": None, "type": "file", } self.dir_cache.update( { str(snake_case__ ): {"name": str(snake_case__ ), "size": None, "type": "directory"} for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1] } ) def __a ( self : int , snake_case__ : str , snake_case__ : str = "rb" , **snake_case__ : Union[str, Any] , ): '''simple docstring''' if not isinstance(self.repo_info , snake_case__ ): raise NotImplementedError(f'Open is only implemented for dataset repositories, but got {self.repo_info}' ) UpperCAmelCase__ : str = hf_hub_url(self.repo_info.id , snake_case__ , revision=self.repo_info.sha ) return fsspec.open( snake_case__ , mode=snake_case__ , headers=get_authentication_headers_for_url(snake_case__ , use_auth_token=self.token ) , client_kwargs={"trust_env": True} , ).open() def __a ( self : Optional[Any] , snake_case__ : Tuple , **snake_case__ : Optional[Any] ): '''simple docstring''' self._get_dirs() UpperCAmelCase__ : Tuple = self._strip_protocol(snake_case__ ) if path in self.dir_cache: return self.dir_cache[path] else: raise FileNotFoundError(snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Tuple=False , **snake_case__ : int ): '''simple docstring''' self._get_dirs() UpperCAmelCase__ : List[str] = PurePosixPath(path.strip("/" ) ) UpperCAmelCase__ : Optional[Any] = {} for p, f in self.dir_cache.items(): UpperCAmelCase__ : int = PurePosixPath(p.strip("/" ) ) UpperCAmelCase__ : Dict = p.parent if root == path: UpperCAmelCase__ : str = f UpperCAmelCase__ : List[str] = list(paths.values() ) if detail: return out else: return sorted(f["name"] for f in out )
298
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
1
"""simple docstring""" import json import os import unittest from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =BioGptTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Union[str, Any] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Union[str, Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Optional[int] = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = "lower newer" UpperCAmelCase__ : List[Any] = "lower newer" return input_text, output_text def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : str = "lower" UpperCAmelCase__ : Tuple = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : str = tokens + ["<unk>"] UpperCAmelCase__ : Tuple = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BioGptTokenizer.from_pretrained("microsoft/biogpt" ) UpperCAmelCase__ : Dict = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : int = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : str = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) self.assertTrue(encoded_sentence == [2] + text ) self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
298
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
1
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int: '''simple docstring''' if n == 1 or not isinstance(snake_case , snake_case ): return 0 elif n == 2: return 1 else: UpperCAmelCase__ : Any = [0, 1] for i in range(2 , n + 1 ): sequence.append(sequence[i - 1] + sequence[i - 2] ) return sequence[n] def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : int = 0 UpperCAmelCase__ : Any = 2 while digits < n: index += 1 UpperCAmelCase__ : int = len(str(fibonacci(snake_case ) ) ) return index def SCREAMING_SNAKE_CASE__ ( snake_case : int = 1000 )-> int: '''simple docstring''' return fibonacci_digits_index(snake_case ) if __name__ == "__main__": print(solution(int(str(input()).strip())))
298
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
1
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) _lowerCAmelCase : Any = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = [ """VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""", """ViTMAEForPreTraining""", """ViTMAELayer""", """ViTMAEModel""", """ViTMAEPreTrainedModel""", ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : str = [ """TFViTMAEForPreTraining""", """TFViTMAEModel""", """TFViTMAEPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_vit_mae import ( VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST, ViTMAEForPreTraining, ViTMAELayer, ViTMAEModel, ViTMAEPreTrainedModel, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel else: import sys _lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
1
"""simple docstring""" import itertools import os import random import tempfile import unittest import numpy as np from transformers import TvltFeatureExtractor, is_datasets_available from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio from transformers.utils.import_utils import is_torch_available from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_torch_available(): import torch if is_datasets_available(): from datasets import load_dataset _lowerCAmelCase : Any = random.Random() def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[str]=1.0 , snake_case : str=None , snake_case : Tuple=None )-> int: '''simple docstring''' if rng is None: UpperCAmelCase__ : Union[str, Any] = global_rng UpperCAmelCase__ : List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Optional[int] , snake_case__ : Tuple , snake_case__ : Optional[int]=7 , snake_case__ : Optional[int]=4_0_0 , snake_case__ : List[str]=2_0_0_0 , snake_case__ : Any=2_0_4_8 , snake_case__ : Union[str, Any]=1_2_8 , snake_case__ : str=1 , snake_case__ : Optional[int]=5_1_2 , snake_case__ : int=3_0 , snake_case__ : Tuple=4_4_1_0_0 , ): '''simple docstring''' UpperCAmelCase__ : Dict = parent UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : List[str] = min_seq_length UpperCAmelCase__ : List[Any] = max_seq_length UpperCAmelCase__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) UpperCAmelCase__ : int = spectrogram_length UpperCAmelCase__ : int = feature_size UpperCAmelCase__ : List[Any] = num_audio_channels UpperCAmelCase__ : int = hop_length UpperCAmelCase__ : Any = chunk_length UpperCAmelCase__ : Any = sampling_rate def __a ( self : Optional[int] ): '''simple docstring''' return { "spectrogram_length": self.spectrogram_length, "feature_size": self.feature_size, "num_audio_channels": self.num_audio_channels, "hop_length": self.hop_length, "chunk_length": self.chunk_length, "sampling_rate": self.sampling_rate, } def __a ( self : List[str] , snake_case__ : str=False , snake_case__ : Tuple=False ): '''simple docstring''' def _flatten(snake_case__ : str ): return list(itertools.chain(*snake_case__ ) ) if equal_length: UpperCAmelCase__ : Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )] else: # make sure that inputs increase in size UpperCAmelCase__ : List[str] = [ floats_list((x, self.feature_size) ) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff ) ] if numpify: UpperCAmelCase__ : Any = [np.asarray(snake_case__ ) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =TvltFeatureExtractor def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = TvltFeatureExtractionTester(self ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = self.feature_extraction_class(**self.feat_extract_dict ) self.assertTrue(hasattr(snake_case__ , "spectrogram_length" ) ) self.assertTrue(hasattr(snake_case__ , "feature_size" ) ) self.assertTrue(hasattr(snake_case__ , "num_audio_channels" ) ) self.assertTrue(hasattr(snake_case__ , "hop_length" ) ) self.assertTrue(hasattr(snake_case__ , "chunk_length" ) ) self.assertTrue(hasattr(snake_case__ , "sampling_rate" ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ : List[str] = feat_extract_first.save_pretrained(snake_case__ )[0] check_json_file_has_correct_format(snake_case__ ) UpperCAmelCase__ : Optional[Any] = self.feature_extraction_class.from_pretrained(snake_case__ ) UpperCAmelCase__ : Dict = feat_extract_first.to_dict() UpperCAmelCase__ : Tuple = feat_extract_second.to_dict() UpperCAmelCase__ : Optional[int] = dict_first.pop("mel_filters" ) UpperCAmelCase__ : Any = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(snake_case__ , snake_case__ ) ) self.assertEqual(snake_case__ , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict ) with tempfile.TemporaryDirectory() as tmpdirname: UpperCAmelCase__ : Tuple = os.path.join(snake_case__ , "feat_extract.json" ) feat_extract_first.to_json_file(snake_case__ ) UpperCAmelCase__ : List[str] = self.feature_extraction_class.from_json_file(snake_case__ ) UpperCAmelCase__ : Dict = feat_extract_first.to_dict() UpperCAmelCase__ : Optional[Any] = feat_extract_second.to_dict() UpperCAmelCase__ : Optional[int] = dict_first.pop("mel_filters" ) UpperCAmelCase__ : Optional[Any] = dict_second.pop("mel_filters" ) self.assertTrue(np.allclose(snake_case__ , snake_case__ ) ) self.assertEqual(snake_case__ , snake_case__ ) def __a ( self : Dict ): '''simple docstring''' # Initialize feature_extractor UpperCAmelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict ) # create three inputs of length 800, 1000, and 1200 UpperCAmelCase__ : List[Any] = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )] UpperCAmelCase__ : int = [np.asarray(snake_case__ ) for speech_input in speech_inputs] # Test not batched input UpperCAmelCase__ : Any = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test batched UpperCAmelCase__ : Optional[int] = feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test audio masking UpperCAmelCase__ : Tuple = feature_extractor( snake_case__ , return_tensors="np" , sampling_rate=4_4_1_0_0 , mask_audio=snake_case__ ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) # Test 2-D numpy arrays are batched. UpperCAmelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)] UpperCAmelCase__ : Union[str, Any] = np.asarray(snake_case__ ) UpperCAmelCase__ : Any = feature_extractor(snake_case__ , return_tensors="np" , sampling_rate=4_4_1_0_0 ).audio_values self.assertTrue(encoded_audios.ndim == 4 ) self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size ) self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length ) self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels ) def __a ( self : Any , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" ) # automatic decoding with librispeech UpperCAmelCase__ : Optional[Any] = ds.sort("id" ).select(range(snake_case__ ) )[:num_samples]["audio"] return [x["array"] for x in speech_samples] def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[str] = self._load_datasamples(1 ) UpperCAmelCase__ : Optional[int] = TvltFeatureExtractor() UpperCAmelCase__ : Tuple = feature_extractor(snake_case__ , return_tensors="pt" ).audio_values self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) ) UpperCAmelCase__ : Dict = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] ) self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , snake_case__ , atol=1e-4 ) )
298
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
1
"""simple docstring""" from typing import Dict import numpy as np from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException if is_tf_available(): import tensorflow as tf from ..tf_utils import stable_softmax if is_torch_available(): import torch _lowerCAmelCase : Any = logging.get_logger(__name__) @add_end_docstrings( __magic_name__ , r''' top_k (`int`, defaults to 5): The number of predictions to return. targets (`str` or `List[str]`, *optional*): When passed, the model will limit the scores to the passed targets instead of looking up in the whole vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting token will be used (with a warning, and that might be slower). ''' , ) class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : GenericTensor ): '''simple docstring''' if self.framework == "tf": UpperCAmelCase__ : str = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy() elif self.framework == "pt": UpperCAmelCase__ : List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case__ ) else: raise ValueError("Unsupported framework" ) return masked_index def __a ( self : Dict , snake_case__ : GenericTensor ): '''simple docstring''' UpperCAmelCase__ : Dict = self.get_masked_index(snake_case__ ) UpperCAmelCase__ : Tuple = np.prod(masked_index.shape ) if numel < 1: raise PipelineException( "fill-mask" , self.model.base_model_prefix , f'No mask_token ({self.tokenizer.mask_token}) found on the input' , ) def __a ( self : Dict , snake_case__ : GenericTensor ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): for model_input in model_inputs: self._ensure_exactly_one_mask_token(model_input["input_ids"][0] ) else: for input_ids in model_inputs["input_ids"]: self._ensure_exactly_one_mask_token(snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Dict=None , **snake_case__ : Union[str, Any] ): '''simple docstring''' if return_tensors is None: UpperCAmelCase__ : List[str] = self.framework UpperCAmelCase__ : Optional[int] = self.tokenizer(snake_case__ , return_tensors=snake_case__ ) self.ensure_exactly_one_mask_token(snake_case__ ) return model_inputs def __a ( self : str , snake_case__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model(**snake_case__ ) UpperCAmelCase__ : Dict = model_inputs["input_ids"] return model_outputs def __a ( self : List[Any] , snake_case__ : str , snake_case__ : Optional[Any]=5 , snake_case__ : List[str]=None ): '''simple docstring''' # Cap top_k if there are targets if target_ids is not None and target_ids.shape[0] < top_k: UpperCAmelCase__ : Dict = target_ids.shape[0] UpperCAmelCase__ : Dict = model_outputs["input_ids"][0] UpperCAmelCase__ : List[str] = model_outputs["logits"] if self.framework == "tf": UpperCAmelCase__ : Tuple = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0] UpperCAmelCase__ : Any = outputs.numpy() UpperCAmelCase__ : Union[str, Any] = outputs[0, masked_index, :] UpperCAmelCase__ : Optional[int] = stable_softmax(snake_case__ , axis=-1 ) if target_ids is not None: UpperCAmelCase__ : Union[str, Any] = tf.gather_nd(tf.squeeze(snake_case__ , 0 ) , target_ids.reshape(-1 , 1 ) ) UpperCAmelCase__ : Optional[Any] = tf.expand_dims(snake_case__ , 0 ) UpperCAmelCase__ : int = tf.math.top_k(snake_case__ , k=snake_case__ ) UpperCAmelCase__ , UpperCAmelCase__ : str = topk.values.numpy(), topk.indices.numpy() else: UpperCAmelCase__ : Any = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=snake_case__ ).squeeze(-1 ) # Fill mask pipeline supports only one ${mask_token} per sample UpperCAmelCase__ : Dict = outputs[0, masked_index, :] UpperCAmelCase__ : int = logits.softmax(dim=-1 ) if target_ids is not None: UpperCAmelCase__ : str = probs[..., target_ids] UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = probs.topk(snake_case__ ) UpperCAmelCase__ : Any = [] UpperCAmelCase__ : Any = values.shape[0] == 1 for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ): UpperCAmelCase__ : Optional[Any] = [] for v, p in zip(_values , _predictions ): # Copy is important since we're going to modify this array in place UpperCAmelCase__ : Optional[int] = input_ids.numpy().copy() if target_ids is not None: UpperCAmelCase__ : Dict = target_ids[p].tolist() UpperCAmelCase__ : Optional[Any] = p # Filter padding out: UpperCAmelCase__ : int = tokens[np.where(tokens != self.tokenizer.pad_token_id )] # Originally we skip special tokens to give readable output. # For multi masks though, the other [MASK] would be removed otherwise # making the output look odd, so we add them back UpperCAmelCase__ : Union[str, Any] = self.tokenizer.decode(snake_case__ , skip_special_tokens=snake_case__ ) UpperCAmelCase__ : int = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence} row.append(snake_case__ ) result.append(snake_case__ ) if single_mask: return result[0] return result def __a ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Any=None ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Tuple = [targets] try: UpperCAmelCase__ : List[Any] = self.tokenizer.get_vocab() except Exception: UpperCAmelCase__ : Dict = {} UpperCAmelCase__ : Optional[int] = [] for target in targets: UpperCAmelCase__ : Tuple = vocab.get(snake_case__ , snake_case__ ) if id_ is None: UpperCAmelCase__ : str = self.tokenizer( snake_case__ , add_special_tokens=snake_case__ , return_attention_mask=snake_case__ , return_token_type_ids=snake_case__ , max_length=1 , truncation=snake_case__ , )["input_ids"] if len(snake_case__ ) == 0: logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' "We cannot replace it with anything meaningful, ignoring it" ) continue UpperCAmelCase__ : List[Any] = input_ids[0] # XXX: If users encounter this pass # it becomes pretty slow, so let's make sure # The warning enables them to fix the input to # get faster performance. logger.warning( f'The specified target token `{target}` does not exist in the model vocabulary. ' f'Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.' ) target_ids.append(id_ ) UpperCAmelCase__ : str = list(set(snake_case__ ) ) if len(snake_case__ ) == 0: raise ValueError("At least one target must be provided when passed." ) UpperCAmelCase__ : Tuple = np.array(snake_case__ ) return target_ids def __a ( self : Tuple , snake_case__ : List[str]=None , snake_case__ : Tuple=None ): '''simple docstring''' UpperCAmelCase__ : List[Any] = {} if targets is not None: UpperCAmelCase__ : Tuple = self.get_target_ids(snake_case__ , snake_case__ ) UpperCAmelCase__ : Dict = target_ids if top_k is not None: UpperCAmelCase__ : Optional[int] = top_k if self.tokenizer.mask_token_id is None: raise PipelineException( "fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." ) return {}, {}, postprocess_params def __call__( self : Union[str, Any] , snake_case__ : Union[str, Any] , *snake_case__ : int , **snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = super().__call__(snake_case__ , **snake_case__ ) if isinstance(snake_case__ , snake_case__ ) and len(snake_case__ ) == 1: return outputs[0] return outputs
298
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
1
"""simple docstring""" from __future__ import annotations from collections.abc import Callable def SCREAMING_SNAKE_CASE__ ( snake_case : Callable[[int | float], int | float] , snake_case : int | float , snake_case : int | float , snake_case : int = 100 , )-> float: '''simple docstring''' UpperCAmelCase__ : List[str] = x_start UpperCAmelCase__ : List[Any] = fnc(snake_case ) UpperCAmelCase__ : Optional[int] = 0.0 for _ in range(snake_case ): # Approximates small segments of curve as linear and solve # for trapezoidal area UpperCAmelCase__ : int = (x_end - x_start) / steps + xa UpperCAmelCase__ : int = fnc(snake_case ) area += abs(fxa + fxa ) * (xa - xa) / 2 # Increment step UpperCAmelCase__ : Any = xa UpperCAmelCase__ : Any = fxa return area if __name__ == "__main__": def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Optional[Any]: '''simple docstring''' return x**3 + x**2 print("""f(x) = x^3 + x^2""") print("""The area between the curve, x = -5, x = 5 and the x axis is:""") _lowerCAmelCase : List[str] = 10 while i <= 100_000: print(F"""with {i} steps: {trapezoidal_area(f, -5, 5, i)}""") i *= 10
298
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
1
"""simple docstring""" from __future__ import annotations class lowerCAmelCase__ : def __init__( self : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = order # a_{0} ... a_{k} UpperCAmelCase__ : Any = [1.0] + [0.0] * order # b_{0} ... b_{k} UpperCAmelCase__ : List[Any] = [1.0] + [0.0] * order # x[n-1] ... x[n-k] UpperCAmelCase__ : int = [0.0] * self.order # y[n-1] ... y[n-k] UpperCAmelCase__ : List[str] = [0.0] * self.order def __a ( self : List[Any] , snake_case__ : list[float] , snake_case__ : list[float] ): '''simple docstring''' if len(snake_case__ ) < self.order: UpperCAmelCase__ : int = [1.0, *a_coeffs] if len(snake_case__ ) != self.order + 1: UpperCAmelCase__ : Optional[Any] = ( f'Expected a_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(snake_case__ )}' ) raise ValueError(snake_case__ ) if len(snake_case__ ) != self.order + 1: UpperCAmelCase__ : Any = ( f'Expected b_coeffs to have {self.order + 1} elements ' f'for {self.order}-order filter, got {len(snake_case__ )}' ) raise ValueError(snake_case__ ) UpperCAmelCase__ : Any = a_coeffs UpperCAmelCase__ : int = b_coeffs def __a ( self : Dict , snake_case__ : float ): '''simple docstring''' UpperCAmelCase__ : str = 0.0 # Start at index 1 and do index 0 at the end. for i in range(1 , self.order + 1 ): result += ( self.b_coeffs[i] * self.input_history[i - 1] - self.a_coeffs[i] * self.output_history[i - 1] ) UpperCAmelCase__ : Dict = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0] UpperCAmelCase__ : Any = self.input_history[:-1] UpperCAmelCase__ : List[str] = self.output_history[:-1] UpperCAmelCase__ : Optional[int] = sample UpperCAmelCase__ : str = result return result
298
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
1
"""simple docstring""" import os from collections import deque import torch from torch.utils.data import Dataset class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : int , snake_case__ : Optional[Any]="" , snake_case__ : str="train" ): '''simple docstring''' assert os.path.isdir(snake_case__ ) UpperCAmelCase__ : int = [] UpperCAmelCase__ : int = os.listdir(snake_case__ ) for story_filename in story_filenames_list: if "summary" in story_filename: continue UpperCAmelCase__ : str = os.path.join(snake_case__ , snake_case__ ) if not os.path.isfile(snake_case__ ): continue self.documents.append(snake_case__ ) def __len__( self : int ): '''simple docstring''' return len(self.documents ) def __getitem__( self : Dict , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.documents[idx] UpperCAmelCase__ : List[Any] = document_path.split("/" )[-1] with open(snake_case__ , encoding="utf-8" ) as source: UpperCAmelCase__ : List[str] = source.read() UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = process_story(snake_case__ ) return document_name, story_lines, summary_lines def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> int: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = list(filter(lambda snake_case : len(snake_case ) != 0 , [line.strip() for line in raw_story.split("\n" )] ) ) # for some unknown reason some lines miss a period, add it UpperCAmelCase__ : Tuple = [_add_missing_period(snake_case ) for line in nonempty_lines] # gather article lines UpperCAmelCase__ : str = [] UpperCAmelCase__ : Any = deque(snake_case ) while True: try: UpperCAmelCase__ : Optional[int] = lines.popleft() if element.startswith("@highlight" ): break story_lines.append(snake_case ) except IndexError: # if "@highlight" is absent from the file we pop # all elements until there is None, raising an exception. return story_lines, [] # gather summary lines UpperCAmelCase__ : Optional[Any] = list(filter(lambda snake_case : not t.startswith("@highlight" ) , snake_case ) ) return story_lines, summary_lines def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Dict: '''simple docstring''' UpperCAmelCase__ : Optional[int] = [".", "!", "?", "...", "'", "`", "\"", "\u2019", "\u2019", ")"] if line.startswith("@highlight" ): return line if line[-1] in END_TOKENS: return line return line + "." def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : List[Any] , snake_case : str )-> int: '''simple docstring''' if len(snake_case ) > block_size: return sequence[:block_size] else: sequence.extend([pad_token_id] * (block_size - len(snake_case )) ) return sequence def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Union[str, Any] )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = torch.ones_like(snake_case ) UpperCAmelCase__ : Optional[Any] = sequence == pad_token_id UpperCAmelCase__ : Optional[Any] = 0 return mask def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Tuple , snake_case : str )-> Tuple: '''simple docstring''' UpperCAmelCase__ : Dict = [tokenizer.encode(snake_case ) for line in story_lines] UpperCAmelCase__ : Optional[Any] = [token for sentence in story_lines_token_ids for token in sentence] UpperCAmelCase__ : Any = [tokenizer.encode(snake_case ) for line in summary_lines] UpperCAmelCase__ : Dict = [token for sentence in summary_lines_token_ids for token in sentence] return story_token_ids, summary_token_ids def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Optional[Any] )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : str = [] for sequence in batch: UpperCAmelCase__ : Dict = -1 UpperCAmelCase__ : Optional[Any] = [] for s in sequence: if s == separator_token_id: sentence_num += 1 embeddings.append(sentence_num % 2 ) batch_embeddings.append(snake_case ) return torch.tensor(snake_case )
298
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
1
"""simple docstring""" import warnings from ...utils import logging from .image_processing_chinese_clip import ChineseCLIPImageProcessor _lowerCAmelCase : Tuple = logging.get_logger(__name__) class lowerCAmelCase__ ( __magic_name__ ): def __init__( self : Union[str, Any] , *snake_case__ : Tuple , **snake_case__ : Optional[int] ): '''simple docstring''' warnings.warn( "The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers." " Please use ChineseCLIPImageProcessor instead." , snake_case__ , ) super().__init__(*snake_case__ , **snake_case__ )
298
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
1
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) _lowerCAmelCase : List[str] = logging.getLogger(__name__) if __name__ == "__main__": _lowerCAmelCase : int = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=30_522, type=int) _lowerCAmelCase : str = parser.parse_args() logger.info(F"""Loading data from {args.data_file}""") with open(args.data_file, """rb""") as fp: _lowerCAmelCase : Dict = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") _lowerCAmelCase : Dict = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCAmelCase : Tuple = [0] * args.vocab_size for k, v in counter.items(): _lowerCAmelCase : List[Any] = v logger.info(F"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
298
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
1
"""simple docstring""" import inspect import unittest from transformers import RegNetConfig from transformers.file_utils import cached_property, is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import RegNetForImageClassification, RegNetModel from transformers.models.regnet.modeling_regnet import REGNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : int , snake_case__ : str=3 , snake_case__ : List[str]=3_2 , snake_case__ : int=3 , snake_case__ : Dict=1_0 , snake_case__ : str=[1_0, 2_0, 3_0, 4_0] , snake_case__ : Optional[Any]=[1, 1, 2, 1] , snake_case__ : List[str]=True , snake_case__ : Dict=True , snake_case__ : Optional[int]="relu" , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=None , ): '''simple docstring''' UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : str = batch_size UpperCAmelCase__ : Union[str, Any] = image_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : List[Any] = embeddings_size UpperCAmelCase__ : Union[str, Any] = hidden_sizes UpperCAmelCase__ : Any = depths UpperCAmelCase__ : Tuple = is_training UpperCAmelCase__ : Optional[Any] = use_labels UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : List[str] = num_labels UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Any = len(snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : Dict = None if self.use_labels: UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_labels ) UpperCAmelCase__ : Optional[int] = self.get_config() return config, pixel_values, labels def __a ( self : Union[str, Any] ): '''simple docstring''' return RegNetConfig( num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , ) def __a ( self : Any , snake_case__ : Tuple , snake_case__ : int , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : str = RegNetModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[Any] = model(snake_case__ ) # expected last hidden states: B, C, H // 32, W // 32 self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 3_2, self.image_size // 3_2) , ) def __a ( self : Union[str, Any] , snake_case__ : str , snake_case__ : Tuple , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.num_labels UpperCAmelCase__ : List[Any] = RegNetForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = config_and_inputs UpperCAmelCase__ : Union[str, Any] = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(RegNetModel, RegNetForImageClassification) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =( {'''feature-extraction''': RegNetModel, '''image-classification''': RegNetForImageClassification} if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = RegNetModelTester(self ) UpperCAmelCase__ : Dict = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ ) def __a ( self : Any ): '''simple docstring''' self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def __a ( self : Any ): '''simple docstring''' return @unittest.skip(reason="RegNet does not use inputs_embeds" ) def __a ( self : Tuple ): '''simple docstring''' pass @unittest.skip(reason="RegNet does not support input and output embeddings" ) def __a ( self : str ): '''simple docstring''' pass def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : List[str] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : List[str] = [*signature.parameters.keys()] UpperCAmelCase__ : Union[str, Any] = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Union[str, Any] = model_class(config=snake_case__ ) for name, module in model.named_modules(): if isinstance(snake_case__ , (nn.BatchNormad, nn.GroupNorm) ): self.assertTrue( torch.all(module.weight == 1 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) self.assertTrue( torch.all(module.bias == 0 ) , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) def __a ( self : Optional[int] ): '''simple docstring''' def check_hidden_states_output(snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] ): UpperCAmelCase__ : Tuple = model_class(snake_case__ ) model.to(snake_case__ ) model.eval() with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**self._prepare_for_class(snake_case__ , snake_case__ ) ) UpperCAmelCase__ : Dict = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states UpperCAmelCase__ : Union[str, Any] = self.model_tester.num_stages self.assertEqual(len(snake_case__ ) , expected_num_stages + 1 ) # RegNet's feature maps are of shape (batch_size, num_channels, height, width) self.assertListEqual( list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 2, self.model_tester.image_size // 2] , ) UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = ["basic", "bottleneck"] for model_class in self.all_model_classes: for layer_type in layers_type: UpperCAmelCase__ : Dict = layer_type UpperCAmelCase__ : Tuple = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] UpperCAmelCase__ : Dict = True check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) @slow def __a ( self : str ): '''simple docstring''' for model_name in REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[int] = RegNetModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> List[str]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : List[Any] ): '''simple docstring''' return ( AutoImageProcessor.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ) if is_vision_available() else None ) @slow def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = RegNetForImageClassification.from_pretrained(REGNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Dict = model(**snake_case__ ) # verify the logits UpperCAmelCase__ : Dict = torch.Size((1, 1_0_0_0) ) self.assertEqual(outputs.logits.shape , snake_case__ ) UpperCAmelCase__ : Dict = torch.tensor([-0.4180, -1.5051, -3.4836] ).to(snake_case__ ) self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
298
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
1
"""simple docstring""" from manim import * class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase__ : List[str] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Any = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Optional[int] = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Any = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : List[str] = VGroup(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Dict = Text("CPU" , font_size=2_4 ) UpperCAmelCase__ : int = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(snake_case__ ) UpperCAmelCase__ : Optional[Any] = [mem.copy() for i in range(1 )] UpperCAmelCase__ : Dict = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Dict = Text("GPU" , font_size=2_4 ) UpperCAmelCase__ : Dict = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ ) gpu.align_to(snake_case__ , snake_case__ ) gpu.set_x(gpu.get_x() - 1 ) self.add(snake_case__ ) UpperCAmelCase__ : str = [mem.copy() for i in range(6 )] UpperCAmelCase__ : str = VGroup(*snake_case__ ).arrange(snake_case__ , buff=0 ) UpperCAmelCase__ : Dict = Text("Model" , font_size=2_4 ) UpperCAmelCase__ : List[Any] = Group(snake_case__ , snake_case__ ).arrange(snake_case__ , buff=0.5 , aligned_edge=snake_case__ ) model.move_to([3, -1.0, 0] ) self.play( Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , Create(snake_case__ , run_time=1 ) , ) UpperCAmelCase__ : Tuple = MarkupText( f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , ) UpperCAmelCase__ : Optional[Any] = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase__ : Dict = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(snake_case__ , run_time=2.5 ) , Write(snake_case__ ) , Write(snake_case__ ) ) self.add(snake_case__ ) UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : List[str] = [] for i, rect in enumerate(snake_case__ ): UpperCAmelCase__ : Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(snake_case__ , opacity=0.7 ) cpu_target.move_to(snake_case__ ) cpu_target.generate_target() UpperCAmelCase__ : Optional[Any] = 0.46 / 4 UpperCAmelCase__ : Tuple = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=snake_case__ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=snake_case__ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=snake_case__ , buff=0.0 ) cpu_targs.append(snake_case__ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(snake_case__ ) ) second_animations.append(MoveToTarget(snake_case__ , run_time=1.5 ) ) self.play(*snake_case__ ) self.play(*snake_case__ ) self.wait()
298
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
1
"""simple docstring""" from __future__ import annotations # This is the precision for this function which can be altered. # It is recommended for users to keep this number greater than or equal to 10. _lowerCAmelCase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : list[int] , snake_case : int )-> int: '''simple docstring''' for i in range(snake_case , snake_case ): if array[i] == target: return i return -1 def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : List[Any] = len(snake_case ) while left <= right: if right - left < precision: return lin_search(snake_case , snake_case , snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = (left + right) // 3 + 1 UpperCAmelCase__ : Union[str, Any] = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: UpperCAmelCase__ : Any = one_third - 1 elif array[two_third] < target: UpperCAmelCase__ : Any = two_third + 1 else: UpperCAmelCase__ : str = one_third + 1 UpperCAmelCase__ : Union[str, Any] = two_third - 1 else: return -1 def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int , snake_case : list[int] , snake_case : int )-> int: '''simple docstring''' if left < right: if right - left < precision: return lin_search(snake_case , snake_case , snake_case , snake_case ) UpperCAmelCase__ : str = (left + right) // 3 + 1 UpperCAmelCase__ : Any = 2 * (left + right) // 3 + 1 if array[one_third] == target: return one_third elif array[two_third] == target: return two_third elif target < array[one_third]: return rec_ternary_search(snake_case , one_third - 1 , snake_case , snake_case ) elif array[two_third] < target: return rec_ternary_search(two_third + 1 , snake_case , snake_case , snake_case ) else: return rec_ternary_search(one_third + 1 , two_third - 1 , snake_case , snake_case ) else: return -1 if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Tuple = input("""Enter numbers separated by comma:\n""").strip() _lowerCAmelCase : str = [int(item.strip()) for item in user_input.split(""",""")] assert collection == sorted(collection), F"List must be ordered.\n{collection}." _lowerCAmelCase : List[str] = int(input("""Enter the number to be found in the list:\n""").strip()) _lowerCAmelCase : Optional[Any] = ite_ternary_search(collection, target) _lowerCAmelCase : Dict = rec_ternary_search(0, len(collection) - 1, collection, target) if resulta != -1: print(F"""Iterative search: {target} found at positions: {resulta}""") print(F"""Recursive search: {target} found at positions: {resulta}""") else: print("""Not found""")
298
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
1
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { '''vinvino02/glpn-kitti''': '''https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json''', # See all GLPN models at https://huggingface.co/models?filter=glpn } class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ): SCREAMING_SNAKE_CASE_ ='''glpn''' def __init__( self : Tuple , snake_case__ : int=3 , snake_case__ : Union[str, Any]=4 , snake_case__ : List[Any]=[2, 2, 2, 2] , snake_case__ : Dict=[8, 4, 2, 1] , snake_case__ : str=[3_2, 6_4, 1_6_0, 2_5_6] , snake_case__ : Optional[Any]=[7, 3, 3, 3] , snake_case__ : Optional[int]=[4, 2, 2, 2] , snake_case__ : int=[1, 2, 5, 8] , snake_case__ : Any=[4, 4, 4, 4] , snake_case__ : Dict="gelu" , snake_case__ : List[Any]=0.0 , snake_case__ : Any=0.0 , snake_case__ : Optional[Any]=0.02 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Union[str, Any]=1e-6 , snake_case__ : str=6_4 , snake_case__ : List[str]=1_0 , snake_case__ : Dict=-1 , **snake_case__ : Union[str, Any] , ): '''simple docstring''' super().__init__(**__a ) UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Tuple = num_encoder_blocks UpperCAmelCase__ : Optional[Any] = depths UpperCAmelCase__ : Any = sr_ratios UpperCAmelCase__ : int = hidden_sizes UpperCAmelCase__ : Any = patch_sizes UpperCAmelCase__ : Optional[int] = strides UpperCAmelCase__ : Dict = mlp_ratios UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : Any = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : Union[str, Any] = drop_path_rate UpperCAmelCase__ : Any = layer_norm_eps UpperCAmelCase__ : Any = decoder_hidden_size UpperCAmelCase__ : str = max_depth UpperCAmelCase__ : List[Any] = head_in_index
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from diffusers import ( DDIMScheduler, KandinskyVaaControlnetPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel, ) from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference enable_full_determinism() class lowerCAmelCase__ ( A__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =KandinskyVaaControlnetPipeline SCREAMING_SNAKE_CASE_ =["image_embeds", "negative_image_embeds", "hint"] SCREAMING_SNAKE_CASE_ =["image_embeds", "negative_image_embeds", "hint"] SCREAMING_SNAKE_CASE_ =[ "generator", "height", "width", "latents", "guidance_scale", "num_inference_steps", "return_dict", "guidance_scale", "num_images_per_prompt", "output_type", "return_dict", ] SCREAMING_SNAKE_CASE_ =False @property def __a ( self : List[Any] ): '''simple docstring''' return 3_2 @property def __a ( self : Tuple ): '''simple docstring''' return 3_2 @property def __a ( self : List[Any] ): '''simple docstring''' return self.time_input_dim @property def __a ( self : List[Any] ): '''simple docstring''' return self.time_input_dim * 4 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return 1_0_0 @property def __a ( self : Union[str, Any] ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = { """in_channels""": 8, # Out channels is double in channels because predicts mean and variance """out_channels""": 8, """addition_embed_type""": """image_hint""", """down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""), """up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""), """mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""", """block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2), """layers_per_block""": 1, """encoder_hid_dim""": self.text_embedder_hidden_size, """encoder_hid_dim_type""": """image_proj""", """cross_attention_dim""": self.cross_attention_dim, """attention_head_dim""": 4, """resnet_time_scale_shift""": """scale_shift""", """class_embed_type""": None, } UpperCAmelCase__ : Tuple = UNetaDConditionModel(**__A ) return model @property def __a ( self : Any ): '''simple docstring''' return { "block_out_channels": [3_2, 3_2, 6_4, 6_4], "down_block_types": [ "DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D", "AttnDownEncoderBlock2D", ], "in_channels": 3, "latent_channels": 4, "layers_per_block": 1, "norm_num_groups": 8, "norm_type": "spatial", "num_vq_embeddings": 1_2, "out_channels": 3, "up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"], "vq_embed_dim": 4, } @property def __a ( self : Any ): '''simple docstring''' torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = VQModel(**self.dummy_movq_kwargs ) return model def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = self.dummy_unet UpperCAmelCase__ : List[Any] = self.dummy_movq UpperCAmelCase__ : int = DDIMScheduler( num_train_timesteps=1_0_0_0 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=__A , set_alpha_to_one=__A , steps_offset=1 , prediction_type="epsilon" , thresholding=__A , ) UpperCAmelCase__ : Optional[Any] = { """unet""": unet, """scheduler""": scheduler, """movq""": movq, } return components def __a ( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any]=0 ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__A ) ).to(__A ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to( __A ) # create hint UpperCAmelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__A ) ).to(__A ) if str(__A ).startswith("mps" ): UpperCAmelCase__ : List[Any] = torch.manual_seed(__A ) else: UpperCAmelCase__ : List[str] = torch.Generator(device=__A ).manual_seed(__A ) UpperCAmelCase__ : str = { """image_embeds""": image_embeds, """negative_image_embeds""": negative_image_embeds, """hint""": hint, """generator""": generator, """height""": 6_4, """width""": 6_4, """guidance_scale""": 4.0, """num_inference_steps""": 2, """output_type""": """np""", } return inputs def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Dict = """cpu""" UpperCAmelCase__ : int = self.get_dummy_components() UpperCAmelCase__ : Tuple = self.pipeline_class(**__A ) UpperCAmelCase__ : Union[str, Any] = pipe.to(__A ) pipe.set_progress_bar_config(disable=__A ) UpperCAmelCase__ : Optional[int] = pipe(**self.get_dummy_inputs(__A ) ) UpperCAmelCase__ : Union[str, Any] = output.images UpperCAmelCase__ : int = pipe( **self.get_dummy_inputs(__A ) , return_dict=__A , )[0] UpperCAmelCase__ : Optional[Any] = image[0, -3:, -3:, -1] UpperCAmelCase__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1] assert image.shape == (1, 6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = np.array( [0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] ) assert ( np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_slice.flatten()}' assert ( np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2 ), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}' @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : Tuple ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy" ) UpperCAmelCase__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinskyv22/hint_image_cat.png" ) UpperCAmelCase__ : Optional[Any] = torch.from_numpy(np.array(__A ) ).float() / 2_5_5.0 UpperCAmelCase__ : str = hint.permute(2 , 0 , 1 ).unsqueeze(0 ) UpperCAmelCase__ : List[str] = KandinskyVaaPriorPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa ) pipe_prior.to(__A ) UpperCAmelCase__ : List[str] = KandinskyVaaControlnetPipeline.from_pretrained( "kandinsky-community/kandinsky-2-2-controlnet-depth" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Optional[Any] = pipeline.to(__A ) pipeline.set_progress_bar_config(disable=__A ) UpperCAmelCase__ : Dict = """A robot, 4k photo""" UpperCAmelCase__ : Union[str, Any] = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = pipe_prior( __A , generator=__A , num_inference_steps=5 , negative_prompt="" , ).to_tuple() UpperCAmelCase__ : Tuple = torch.Generator(device="cuda" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipeline( image_embeds=__A , negative_image_embeds=__A , hint=__A , generator=__A , num_inference_steps=1_0_0 , output_type="np" , ) UpperCAmelCase__ : Any = output.images[0] assert image.shape == (5_1_2, 5_1_2, 3) assert_mean_pixel_difference(__A , __A )
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
"""simple docstring""" import random import unittest import torch from diffusers import IFInpaintingPipeline from diffusers.utils import floats_tensor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import skip_mps, torch_device from ..pipeline_params import ( TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS, ) from ..test_pipelines_common import PipelineTesterMixin from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFInpaintingPipeline SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"width", "height"} SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {"latents"} def __a ( self : List[str] ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Optional[Any] , snake_case__ : List[str] , snake_case__ : int=0 ): '''simple docstring''' if str(_UpperCAmelCase ).startswith("mps" ): UpperCAmelCase__ : Dict = torch.manual_seed(_UpperCAmelCase ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase ) UpperCAmelCase__ : str = { '''prompt''': '''A painting of a squirrel eating a burger''', '''image''': image, '''mask_image''': mask_image, '''generator''': generator, '''num_inference_steps''': 2, '''output_type''': '''numpy''', } return inputs @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : str ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) def __a ( self : Dict ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Optional[int] ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : Union[str, Any] ): '''simple docstring''' self._test_save_load_local() def __a ( self : Dict ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , )
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : int , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Any , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Optional[Any] , *snake_case__ : str , **snake_case__ : str ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : str , *snake_case__ : Any , **snake_case__ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : Tuple , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Optional[int] , *snake_case__ : Union[str, Any] , **snake_case__ : Any ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : Dict , *snake_case__ : List[str] , **snake_case__ : Dict ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Optional[Any] , *snake_case__ : Dict , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : str , *snake_case__ : str , **snake_case__ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : List[str] , *snake_case__ : Dict , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : Union[str, Any] , *snake_case__ : Optional[Any] , **snake_case__ : Any ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : int , **snake_case__ : Any ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Optional[int] , *snake_case__ : List[Any] , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : Dict , *snake_case__ : Optional[Any] , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : Dict , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : List[Any] , *snake_case__ : List[str] , **snake_case__ : str ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : Optional[int] , *snake_case__ : Dict , **snake_case__ : Dict ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Optional[int] , *snake_case__ : Dict , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : List[str] , *snake_case__ : int , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : List[str] , *snake_case__ : Union[str, Any] , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : str , *snake_case__ : Dict , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : Optional[int] , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : List[Any] , *snake_case__ : Union[str, Any] , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : int , *snake_case__ : Optional[Any] , **snake_case__ : int ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : List[str] , *snake_case__ : List[str] , **snake_case__ : str ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : List[Any] , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : str , *snake_case__ : List[str] , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : Optional[int] , *snake_case__ : Any , **snake_case__ : str ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Tuple , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : int , *snake_case__ : List[str] , **snake_case__ : Any ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : str , *snake_case__ : Dict , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : str , *snake_case__ : Optional[int] , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : List[str] , *snake_case__ : Optional[int] , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=A__ ): SCREAMING_SNAKE_CASE_ =['''flax'''] def __init__( self : Union[str, Any] , *snake_case__ : Tuple , **snake_case__ : Dict ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : Optional[int] , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : List[Any] , *snake_case__ : int , **snake_case__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["flax"] )
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" from ... import PretrainedConfig _lowerCAmelCase : Any = { """sijunhe/nezha-cn-base""": """https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json""", } class lowerCAmelCase__ ( a__ ): SCREAMING_SNAKE_CASE_ =NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP SCREAMING_SNAKE_CASE_ ='''nezha''' def __init__( self : Tuple , snake_case__ : int=2_1_1_2_8 , snake_case__ : Optional[Any]=7_6_8 , snake_case__ : Dict=1_2 , snake_case__ : List[str]=1_2 , snake_case__ : int=3_0_7_2 , snake_case__ : List[str]="gelu" , snake_case__ : Optional[int]=0.1 , snake_case__ : Any=0.1 , snake_case__ : List[Any]=5_1_2 , snake_case__ : Optional[Any]=6_4 , snake_case__ : Any=2 , snake_case__ : int=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0 , snake_case__ : Dict=2 , snake_case__ : List[str]=3 , snake_case__ : Union[str, Any]=True , **snake_case__ : Optional[Any] , ): '''simple docstring''' super().__init__(pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase ) UpperCAmelCase__ : Dict = vocab_size UpperCAmelCase__ : int = hidden_size UpperCAmelCase__ : str = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : List[Any] = hidden_act UpperCAmelCase__ : Optional[Any] = intermediate_size UpperCAmelCase__ : int = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = max_position_embeddings UpperCAmelCase__ : List[str] = max_relative_position UpperCAmelCase__ : Union[str, Any] = type_vocab_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : Tuple = layer_norm_eps UpperCAmelCase__ : Optional[Any] = classifier_dropout UpperCAmelCase__ : int = use_cache
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" from timeit import timeit def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> List[str]: '''simple docstring''' if number < 0: raise ValueError("the value of input must not be negative" ) UpperCAmelCase__ : Tuple = 0 while number: number &= number - 1 result += 1 return result def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> Any: '''simple docstring''' if number < 0: raise ValueError("the value of input must not be negative" ) UpperCAmelCase__ : Optional[int] = 0 while number: if number % 2 == 1: result += 1 number >>= 1 return result def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' def do_benchmark(snake_case : int ) -> None: UpperCAmelCase__ : Dict = 'import __main__ as z' print(f'Benchmark when {number = }:' ) print(f'{get_set_bits_count_using_modulo_operator(__a ) = }' ) UpperCAmelCase__ : Tuple = timeit("z.get_set_bits_count_using_modulo_operator(25)" , setup=__a ) print(f'timeit() runs in {timing} seconds' ) print(f'{get_set_bits_count_using_brian_kernighans_algorithm(__a ) = }' ) UpperCAmelCase__ : Dict = timeit( "z.get_set_bits_count_using_brian_kernighans_algorithm(25)" , setup=__a , ) print(f'timeit() runs in {timing} seconds' ) for number in (25, 37, 58, 0): do_benchmark(__a ) print() if __name__ == "__main__": import doctest doctest.testmod() benchmark()
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """xlm-roberta-base""": """https://huggingface.co/xlm-roberta-base/resolve/main/config.json""", """xlm-roberta-large""": """https://huggingface.co/xlm-roberta-large/resolve/main/config.json""", """xlm-roberta-large-finetuned-conll02-dutch""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll02-spanish""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-english""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json""" ), """xlm-roberta-large-finetuned-conll03-german""": ( """https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json""" ), } class lowerCAmelCase__ ( lowerCamelCase__ ): SCREAMING_SNAKE_CASE_ ='xlm-roberta' def __init__( self : List[Any] , snake_case__ : str=3_0_5_2_2 , snake_case__ : str=7_6_8 , snake_case__ : List[str]=1_2 , snake_case__ : Union[str, Any]=1_2 , snake_case__ : Dict=3_0_7_2 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Any=0.1 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : Optional[Any]=2 , snake_case__ : List[str]=0.02 , snake_case__ : Union[str, Any]=1e-12 , snake_case__ : Union[str, Any]=1 , snake_case__ : Optional[int]=0 , snake_case__ : List[Any]=2 , snake_case__ : List[Any]="absolute" , snake_case__ : Union[str, Any]=True , snake_case__ : Dict=None , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : Optional[int] = vocab_size UpperCAmelCase__ : Optional[int] = hidden_size UpperCAmelCase__ : str = num_hidden_layers UpperCAmelCase__ : List[Any] = num_attention_heads UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : List[Any] = intermediate_size UpperCAmelCase__ : List[str] = hidden_dropout_prob UpperCAmelCase__ : List[str] = attention_probs_dropout_prob UpperCAmelCase__ : List[str] = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : int = initializer_range UpperCAmelCase__ : str = layer_norm_eps UpperCAmelCase__ : str = position_embedding_type UpperCAmelCase__ : str = use_cache UpperCAmelCase__ : Optional[Any] = classifier_dropout class lowerCAmelCase__ ( lowerCamelCase__ ): @property def __a ( self : int ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase__ : List[Any] = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase__ : Optional[Any] = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ] )
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" import pickle import unittest import torch from accelerate import Accelerator from accelerate.state import AcceleratorState from accelerate.test_utils import require_cpu @require_cpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = torch.nn.Linear(1_0 , 1_0 ) UpperCAmelCase__ : Any = torch.optim.SGD(model.parameters() , 0.1 ) UpperCAmelCase__ : Union[str, Any] = Accelerator() UpperCAmelCase__ : Any = accelerator.prepare(_snake_case ) try: pickle.loads(pickle.dumps(_snake_case ) ) except Exception as e: self.fail(f'Accelerated optimizer pickling failed with {e}' ) AcceleratorState._reset_state()
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" from typing import Optional import pyspark from .. import Features, NamedSplit from ..download import DownloadMode from ..packaged_modules.spark.spark import Spark from .abc import AbstractDatasetReader class lowerCAmelCase__ ( lowercase__ ): def __init__( self : int , snake_case__ : pyspark.sql.DataFrame , snake_case__ : Optional[NamedSplit] = None , snake_case__ : Optional[Features] = None , snake_case__ : bool = True , snake_case__ : str = None , snake_case__ : bool = False , snake_case__ : str = None , snake_case__ : bool = True , snake_case__ : str = "arrow" , **snake_case__ : str , ): '''simple docstring''' super().__init__( split=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , keep_in_memory=_UpperCamelCase , streaming=_UpperCamelCase , **_UpperCamelCase , ) UpperCAmelCase__ : Optional[Any] = load_from_cache_file UpperCAmelCase__ : Dict = file_format UpperCAmelCase__ : Tuple = Spark( df=_UpperCamelCase , features=_UpperCamelCase , cache_dir=_UpperCamelCase , working_dir=_UpperCamelCase , **_UpperCamelCase , ) def __a ( self : str ): '''simple docstring''' if self.streaming: return self.builder.as_streaming_dataset(split=self.split ) UpperCAmelCase__ : Union[str, Any] = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD self.builder.download_and_prepare( download_mode=_UpperCamelCase , file_format=self._file_format , ) return self.builder.as_dataset(split=self.split )
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Tuple = logging.get_logger(__name__) _lowerCAmelCase : Optional[Any] = { """tiiuae/falcon-40b""": """https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json""", """tiiuae/falcon-7b""": """https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json""", } class lowerCAmelCase__ ( __A ): SCREAMING_SNAKE_CASE_ ='falcon' SCREAMING_SNAKE_CASE_ =['past_key_values'] def __init__( self : List[str] , snake_case__ : List[Any]=6_5_0_2_4 , snake_case__ : Dict=4_5_4_4 , snake_case__ : Dict=3_2 , snake_case__ : Optional[int]=7_1 , snake_case__ : List[Any]=1e-5 , snake_case__ : Any=0.02 , snake_case__ : int=True , snake_case__ : Optional[int]=0.0 , snake_case__ : Union[str, Any]=0.0 , snake_case__ : str=None , snake_case__ : Dict=False , snake_case__ : Union[str, Any]=False , snake_case__ : str=True , snake_case__ : List[str]=True , snake_case__ : str=False , snake_case__ : List[str]=1_1 , snake_case__ : Tuple=1_1 , **snake_case__ : Tuple , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = vocab_size # Backward compatibility with n_embed kwarg UpperCAmelCase__ : str = kwargs.pop("n_embed" , __lowercase ) UpperCAmelCase__ : Union[str, Any] = hidden_size if n_embed is None else n_embed UpperCAmelCase__ : Tuple = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : Optional[Any] = layer_norm_epsilon UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Dict = use_cache UpperCAmelCase__ : int = hidden_dropout UpperCAmelCase__ : int = attention_dropout UpperCAmelCase__ : Any = bos_token_id UpperCAmelCase__ : List[Any] = eos_token_id UpperCAmelCase__ : Any = num_attention_heads if num_kv_heads is None else num_kv_heads UpperCAmelCase__ : int = alibi UpperCAmelCase__ : List[Any] = new_decoder_architecture UpperCAmelCase__ : List[Any] = multi_query # Ignored when new_decoder_architecture is True UpperCAmelCase__ : Union[str, Any] = parallel_attn UpperCAmelCase__ : Optional[Any] = bias super().__init__(bos_token_id=__lowercase , eos_token_id=__lowercase , **__lowercase ) @property def __a ( self : Union[str, Any] ): '''simple docstring''' return self.hidden_size // self.num_attention_heads @property def __a ( self : str ): '''simple docstring''' return not self.alibi
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import torch import torch.nn as nn from ..configuration_utils import ConfigMixin, register_to_config from ..utils import BaseOutput from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps from .modeling_utils import ModelMixin from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block @dataclass class lowerCAmelCase__ ( A_ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( A_ , A_ ): @register_to_config def __init__( self : str , snake_case__ : int = 6_5_5_3_6 , snake_case__ : Optional[int] = None , snake_case__ : int = 2 , snake_case__ : int = 2 , snake_case__ : int = 0 , snake_case__ : str = "fourier" , snake_case__ : bool = True , snake_case__ : bool = False , snake_case__ : float = 0.0 , snake_case__ : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case__ : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case__ : Tuple[str] = "UNetMidBlock1D" , snake_case__ : str = None , snake_case__ : Tuple[int] = (3_2, 3_2, 6_4) , snake_case__ : str = None , snake_case__ : int = 8 , snake_case__ : int = 1 , snake_case__ : bool = False , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Any = sample_size # time if time_embedding_type == "fourier": UpperCAmelCase__ : Tuple = GaussianFourierProjection( embedding_size=8 , set_W_to_weight=snake_case__ , log=snake_case__ , flip_sin_to_cos=snake_case__ ) UpperCAmelCase__ : Union[str, Any] = 2 * block_out_channels[0] elif time_embedding_type == "positional": UpperCAmelCase__ : Union[str, Any] = Timesteps( block_out_channels[0] , flip_sin_to_cos=snake_case__ , downscale_freq_shift=snake_case__ ) UpperCAmelCase__ : Optional[int] = block_out_channels[0] if use_timestep_embedding: UpperCAmelCase__ : Union[str, Any] = block_out_channels[0] * 4 UpperCAmelCase__ : Dict = TimestepEmbedding( in_channels=snake_case__ , time_embed_dim=snake_case__ , act_fn=snake_case__ , out_dim=block_out_channels[0] , ) UpperCAmelCase__ : Tuple = nn.ModuleList([] ) UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : Union[str, Any] = nn.ModuleList([] ) UpperCAmelCase__ : Optional[Any] = None # down UpperCAmelCase__ : Optional[int] = in_channels for i, down_block_type in enumerate(snake_case__ ): UpperCAmelCase__ : Optional[int] = output_channel UpperCAmelCase__ : str = block_out_channels[i] if i == 0: input_channel += extra_in_channels UpperCAmelCase__ : Optional[int] = i == len(snake_case__ ) - 1 UpperCAmelCase__ : Union[str, Any] = get_down_block( snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , ) self.down_blocks.append(snake_case__ ) # mid UpperCAmelCase__ : Tuple = get_mid_block( snake_case__ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case__ , add_downsample=snake_case__ , ) # up UpperCAmelCase__ : Optional[int] = list(reversed(snake_case__ ) ) UpperCAmelCase__ : Optional[Any] = reversed_block_out_channels[0] if out_block_type is None: UpperCAmelCase__ : str = out_channels else: UpperCAmelCase__ : Any = block_out_channels[0] for i, up_block_type in enumerate(snake_case__ ): UpperCAmelCase__ : int = output_channel UpperCAmelCase__ : Union[str, Any] = ( reversed_block_out_channels[i + 1] if i < len(snake_case__ ) - 1 else final_upsample_channels ) UpperCAmelCase__ : int = i == len(snake_case__ ) - 1 UpperCAmelCase__ : Dict = get_up_block( snake_case__ , num_layers=snake_case__ , in_channels=snake_case__ , out_channels=snake_case__ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , ) self.up_blocks.append(snake_case__ ) UpperCAmelCase__ : str = output_channel # out UpperCAmelCase__ : List[Any] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 ) UpperCAmelCase__ : List[Any] = get_out_block( out_block_type=snake_case__ , num_groups_out=snake_case__ , embed_dim=block_out_channels[0] , out_channels=snake_case__ , act_fn=snake_case__ , fc_dim=block_out_channels[-1] // 4 , ) def __a ( self : Tuple , snake_case__ : torch.FloatTensor , snake_case__ : Union[torch.Tensor, float, int] , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if not torch.is_tensor(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device ) elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0: UpperCAmelCase__ : str = timesteps[None].to(sample.device ) UpperCAmelCase__ : Any = self.time_proj(snake_case__ ) if self.config.use_timestep_embedding: UpperCAmelCase__ : str = self.time_mlp(snake_case__ ) else: UpperCAmelCase__ : List[Any] = timestep_embed[..., None] UpperCAmelCase__ : int = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype ) UpperCAmelCase__ : int = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) ) # 2. down UpperCAmelCase__ : int = () for downsample_block in self.down_blocks: UpperCAmelCase__ : List[str] = downsample_block(hidden_states=snake_case__ , temb=snake_case__ ) down_block_res_samples += res_samples # 3. mid if self.mid_block: UpperCAmelCase__ : Dict = self.mid_block(snake_case__ , snake_case__ ) # 4. up for i, upsample_block in enumerate(self.up_blocks ): UpperCAmelCase__ : str = down_block_res_samples[-1:] UpperCAmelCase__ : str = down_block_res_samples[:-1] UpperCAmelCase__ : Optional[Any] = upsample_block(snake_case__ , res_hidden_states_tuple=snake_case__ , temb=snake_case__ ) # 5. post-process if self.out_block: UpperCAmelCase__ : Optional[int] = self.out_block(snake_case__ , snake_case__ ) if not return_dict: return (sample,) return UNetaDOutput(sample=snake_case__ )
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" import numpy as np import torch from torch.utils.data import DataLoader from accelerate.utils.dataclasses import DistributedType class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Any=2 , snake_case__ : List[Any]=3 , snake_case__ : Dict=6_4 , snake_case__ : int=None ): '''simple docstring''' UpperCAmelCase__ : Tuple = np.random.default_rng(_a ) UpperCAmelCase__ : Any = length UpperCAmelCase__ : Any = rng.normal(size=(length,) ).astype(np.floataa ) UpperCAmelCase__ : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa ) def __len__( self : Optional[int] ): '''simple docstring''' return self.length def __getitem__( self : Union[str, Any] , snake_case__ : Tuple ): '''simple docstring''' return {"x": self.x[i], "y": self.y[i]} class lowerCAmelCase__ ( torch.nn.Module ): def __init__( self : str , snake_case__ : int=0 , snake_case__ : Tuple=0 , snake_case__ : List[Any]=False ): '''simple docstring''' super().__init__() UpperCAmelCase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase__ : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() ) UpperCAmelCase__ : int = True def __a ( self : Tuple , snake_case__ : List[str]=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) UpperCAmelCase__ : Tuple = False return x * self.a[0] + self.b[0] class lowerCAmelCase__ ( torch.nn.Module ): def __init__( self : Union[str, Any] , snake_case__ : List[Any]=0 , snake_case__ : int=0 , snake_case__ : Any=False ): '''simple docstring''' super().__init__() UpperCAmelCase__ : Optional[Any] = torch.nn.Parameter(torch.tensor(_a ).float() ) UpperCAmelCase__ : Optional[Any] = torch.nn.Parameter(torch.tensor(_a ).float() ) UpperCAmelCase__ : Dict = True def __a ( self : Optional[int] , snake_case__ : str=None ): '''simple docstring''' if self.first_batch: print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' ) UpperCAmelCase__ : Optional[Any] = False return x * self.a + self.b def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : int = 16 )-> List[str]: '''simple docstring''' from datasets import load_dataset from transformers import AutoTokenizer UpperCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" ) UpperCAmelCase__ : Optional[Any] = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"} UpperCAmelCase__ : List[Any] = load_dataset("csv" , data_files=lowerCAmelCase__ ) UpperCAmelCase__ : Union[str, Any] = datasets["train"].unique("label" ) UpperCAmelCase__ : List[Any] = {v: i for i, v in enumerate(lowerCAmelCase__ )} def tokenize_function(snake_case : int ): # max_length=None => use the model max length (it's actually the default) UpperCAmelCase__ : Optional[int] = tokenizer( examples["sentence1"] , examples["sentence2"] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , padding="max_length" ) if "label" in examples: UpperCAmelCase__ : Tuple = [label_to_id[l] for l in examples["label"]] return outputs # Apply the method we just defined to all the examples in all the splits of the dataset UpperCAmelCase__ : List[Any] = datasets.map( lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["sentence1", "sentence2", "label"] , ) def collate_fn(snake_case : List[Any] ): # On TPU it's best to pad everything to the same length or training will be very slow. if accelerator.distributed_type == DistributedType.TPU: return tokenizer.pad(lowerCAmelCase__ , padding="max_length" , max_length=128 , return_tensors="pt" ) return tokenizer.pad(lowerCAmelCase__ , padding="longest" , return_tensors="pt" ) # Instantiate dataloaders. UpperCAmelCase__ : List[str] = DataLoader(tokenized_datasets["train"] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=2 ) UpperCAmelCase__ : List[str] = DataLoader(tokenized_datasets["validation"] , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=1 ) return train_dataloader, eval_dataloader
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" from collections.abc import Callable import numpy as np def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Dict , snake_case : List[Any] , snake_case : Optional[int] )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = int(np.ceil((x_end - xa) / step_size ) ) UpperCAmelCase__ : List[Any] = np.zeros((n + 1,) ) UpperCAmelCase__ : List[str] = ya UpperCAmelCase__ : Dict = xa for k in range(lowercase__ ): UpperCAmelCase__ : Optional[Any] = y[k] + step_size * ode_func(lowercase__ , y[k] ) UpperCAmelCase__ : Optional[int] = y[k] + ( (step_size / 2) * (ode_func(lowercase__ , y[k] ) + ode_func(x + step_size , lowercase__ )) ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" import os import sys import transformers _lowerCAmelCase : str = """3""" print("""Python version:""", sys.version) print("""transformers version:""", transformers.__version__) try: import torch print("""Torch version:""", torch.__version__) print("""Cuda available:""", torch.cuda.is_available()) print("""Cuda version:""", torch.version.cuda) print("""CuDNN version:""", torch.backends.cudnn.version()) print("""Number of GPUs available:""", torch.cuda.device_count()) print("""NCCL version:""", torch.cuda.nccl.version()) except ImportError: print("""Torch version:""", None) try: import deepspeed print("""DeepSpeed version:""", deepspeed.__version__) except ImportError: print("""DeepSpeed version:""", None) try: import tensorflow as tf print("""TensorFlow version:""", tf.__version__) print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU"""))) print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU"""))) except ImportError: print("""TensorFlow version:""", None)
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import warnings from ...utils import logging from .image_processing_beit import BeitImageProcessor _lowerCAmelCase : Dict = logging.get_logger(__name__) class lowerCAmelCase__ ( a_ ): def __init__( self : str , *snake_case__ : Optional[int] , **snake_case__ : Any ): '''simple docstring''' warnings.warn( "The class BeitFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please" " use BeitImageProcessor instead." , lowercase_ , ) super().__init__(*lowercase_ , **lowercase_ )
364
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
0
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def SCREAMING_SNAKE_CASE__ ( snake_case : Any , snake_case : Optional[int] , snake_case : str=1E-1_2 )-> Dict: '''simple docstring''' UpperCAmelCase__ : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T UpperCAmelCase__ : Optional[Any] = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__a , axis=1 ) , a_min=__a ) ).T return jnp.matmul(__a , norm_emb_a.T ) class lowerCAmelCase__ ( nn.Module ): SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =jnp.floataa def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = FlaxCLIPVisionModule(self.config.vision_config ) UpperCAmelCase__ : Tuple = nn.Dense(self.config.projection_dim , use_bias=_a , dtype=self.dtype ) UpperCAmelCase__ : List[str] = self.param("concept_embeds" , jax.nn.initializers.ones , (1_7, self.config.projection_dim) ) UpperCAmelCase__ : Optional[int] = self.param( "special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) ) UpperCAmelCase__ : Dict = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (1_7,) ) UpperCAmelCase__ : int = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) ) def __call__( self : Optional[int] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.vision_model(_a )[1] UpperCAmelCase__ : Optional[Any] = self.visual_projection(_a ) UpperCAmelCase__ : Tuple = jax_cosine_distance(_a , self.special_care_embeds ) UpperCAmelCase__ : Optional[int] = jax_cosine_distance(_a , self.concept_embeds ) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs UpperCAmelCase__ : Union[str, Any] = 0.0 UpperCAmelCase__ : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment UpperCAmelCase__ : Dict = jnp.round(_a , 3 ) UpperCAmelCase__ : Optional[int] = jnp.any(special_scores > 0 , axis=1 , keepdims=_a ) # Use a lower threshold if an image has any special care concept UpperCAmelCase__ : Union[str, Any] = is_special_care * 0.01 UpperCAmelCase__ : Any = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment UpperCAmelCase__ : Optional[int] = jnp.round(_a , 3 ) UpperCAmelCase__ : int = jnp.any(concept_scores > 0 , axis=1 ) return has_nsfw_concepts class lowerCAmelCase__ ( __lowercase ): SCREAMING_SNAKE_CASE_ =CLIPConfig SCREAMING_SNAKE_CASE_ ="clip_input" SCREAMING_SNAKE_CASE_ =FlaxStableDiffusionSafetyCheckerModule def __init__( self : List[str] , snake_case__ : Optional[Any] , snake_case__ : Tuple = None , snake_case__ : List[Any] = 0 , snake_case__ : Dict = jnp.floataa , snake_case__ : Tuple = True , **snake_case__ : Any , ): '''simple docstring''' if input_shape is None: UpperCAmelCase__ : Optional[Any] = (1, 2_2_4, 2_2_4, 3) UpperCAmelCase__ : int = self.module_class(config=_a , dtype=_a , **_a ) super().__init__(_a , _a , input_shape=_a , seed=_a , dtype=_a , _do_init=_do_init ) def __a ( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : Any = None ): '''simple docstring''' # init input tensor UpperCAmelCase__ : Optional[int] = jax.random.normal(_a , _a ) UpperCAmelCase__ : str = jax.random.split(_a ) UpperCAmelCase__ : Tuple = {'''params''': params_rng, '''dropout''': dropout_rng} UpperCAmelCase__ : Tuple = self.module.init(_a , _a )['''params'''] return random_params def __call__( self : List[str] , snake_case__ : List[str] , snake_case__ : Tuple = None , ): '''simple docstring''' UpperCAmelCase__ : List[str] = jnp.transpose(_a , (0, 2, 3, 1) ) return self.module.apply( {"params": params or self.params} , jnp.array(_a , dtype=jnp.floataa ) , rngs={} , )
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : list[str] | None = None )-> Union[str, Any]: UpperCAmelCase__ : Any = word_bank or [] # create a table UpperCAmelCase__ : int = len(lowerCAmelCase__ ) + 1 UpperCAmelCase__ : list[list[list[str]]] = [] for _ in range(lowerCAmelCase__ ): table.append([] ) # seed value UpperCAmelCase__ : Optional[int] = [[]] # because empty string has empty combination # iterate through the indices for i in range(lowerCAmelCase__ ): # condition if table[i] != []: for word in word_bank: # slice condition if target[i : i + len(lowerCAmelCase__ )] == word: UpperCAmelCase__ : list[list[str]] = [ [word, *way] for way in table[i] ] # adds the word to every combination the current position holds # now,push that combination to the table[i+len(word)] table[i + len(lowerCAmelCase__ )] += new_combinations # combinations are in reverse order so reverse for better output for combination in table[len(lowerCAmelCase__ )]: combination.reverse() return table[len(lowerCAmelCase__ )] if __name__ == "__main__": print(all_construct("""jwajalapa""", ["""jwa""", """j""", """w""", """a""", """la""", """lapa"""])) print(all_construct("""rajamati""", ["""s""", """raj""", """amat""", """raja""", """ma""", """i""", """t"""])) print( all_construct( """hexagonosaurus""", ["""h""", """ex""", """hex""", """ag""", """ago""", """ru""", """auru""", """rus""", """go""", """no""", """o""", """s"""], ) )
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available _lowerCAmelCase : Union[str, Any] = { """configuration_graphormer""": ["""GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GraphormerConfig"""], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Optional[int] = [ """GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST""", """GraphormerForGraphClassification""", """GraphormerModel""", """GraphormerPreTrainedModel""", ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys _lowerCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
367
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
0
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( snake_case : int | str )-> bool: '''simple docstring''' UpperCAmelCase__ : List[Any] = str(_UpperCamelCase ) return n == n[::-1] def SCREAMING_SNAKE_CASE__ ( snake_case : int = 100_0000 )-> Any: '''simple docstring''' UpperCAmelCase__ : List[Any] = 0 for i in range(1 , _UpperCamelCase ): if is_palindrome(_UpperCamelCase ) and is_palindrome(bin(_UpperCamelCase ).split("b" )[1] ): total += i return total if __name__ == "__main__": print(solution(int(str(input().strip()))))
368
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ....tokenization_utils_fast import PreTrainedTokenizerFast from ....utils import logging from .tokenization_retribert import RetriBertTokenizer _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : Tuple = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} _lowerCAmelCase : Optional[int] = { '''vocab_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''yjernite/retribert-base-uncased''': ( '''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json''' ), }, } _lowerCAmelCase : Any = { '''yjernite/retribert-base-uncased''': 512, } _lowerCAmelCase : int = { '''yjernite/retribert-base-uncased''': {'''do_lower_case''': True}, } class lowerCAmelCase__ ( a__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ =PRETRAINED_INIT_CONFIGURATION SCREAMING_SNAKE_CASE_ =RetriBertTokenizer SCREAMING_SNAKE_CASE_ =['input_ids', 'attention_mask'] def __init__( self : Union[str, Any] , snake_case__ : Optional[Any]=None , snake_case__ : Any=None , snake_case__ : Dict=True , snake_case__ : Tuple="[UNK]" , snake_case__ : Union[str, Any]="[SEP]" , snake_case__ : str="[PAD]" , snake_case__ : Tuple="[CLS]" , snake_case__ : Any="[MASK]" , snake_case__ : str=True , snake_case__ : Union[str, Any]=None , **snake_case__ : List[Any] , ): '''simple docstring''' super().__init__( _lowerCamelCase , tokenizer_file=_lowerCamelCase , do_lower_case=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , pad_token=_lowerCamelCase , cls_token=_lowerCamelCase , mask_token=_lowerCamelCase , tokenize_chinese_chars=_lowerCamelCase , strip_accents=_lowerCamelCase , **_lowerCamelCase , ) UpperCAmelCase__ : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() ) if ( normalizer_state.get("lowercase" , _lowerCamelCase ) != do_lower_case or normalizer_state.get("strip_accents" , _lowerCamelCase ) != strip_accents or normalizer_state.get("handle_chinese_chars" , _lowerCamelCase ) != tokenize_chinese_chars ): UpperCAmelCase__ : Union[str, Any] = getattr(_lowerCamelCase , normalizer_state.pop("type" ) ) UpperCAmelCase__ : Union[str, Any] = do_lower_case UpperCAmelCase__ : List[Any] = strip_accents UpperCAmelCase__ : List[Any] = tokenize_chinese_chars UpperCAmelCase__ : Optional[int] = normalizer_class(**_lowerCamelCase ) UpperCAmelCase__ : Tuple = do_lower_case def __a ( self : Any , snake_case__ : List[Any] , snake_case__ : Dict=None ): '''simple docstring''' UpperCAmelCase__ : int = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def __a ( self : Optional[Any] , snake_case__ : List[Any] , snake_case__ : str = None ): '''simple docstring''' UpperCAmelCase__ : List[str] = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] def __a ( self : Dict , snake_case__ : str , snake_case__ : Union[str, Any] = None ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self._tokenizer.model.save(_lowerCamelCase , name=_lowerCamelCase ) return tuple(_lowerCamelCase )
369
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : int = 100_0000 )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : Optional[int] = 1 UpperCAmelCase__ : List[Any] = {1: 1} for inputa in range(2 , __SCREAMING_SNAKE_CASE ): UpperCAmelCase__ : str = 0 UpperCAmelCase__ : Dict = inputa while True: if number in counters: counter += counters[number] break if number % 2 == 0: number //= 2 counter += 1 else: UpperCAmelCase__ : Union[str, Any] = (3 * number) + 1 counter += 1 if inputa not in counters: UpperCAmelCase__ : List[Any] = counter if counter > pre_counter: UpperCAmelCase__ : Tuple = inputa UpperCAmelCase__ : Union[str, Any] = counter return largest_number if __name__ == "__main__": print(solution(int(input().strip())))
370
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
0
"""simple docstring""" from __future__ import annotations import collections import tempfile import unittest import numpy as np from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import is_tf_available, is_vision_available from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask from ..bert.test_modeling_tf_bert import TFBertModelTester from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester from ..deit.test_modeling_tf_deit import TFDeiTModelTester from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester from ..vit.test_modeling_tf_vit import TFViTModelTester if is_tf_available(): from transformers import ( TFBertModel, TFCLIPVisionModel, TFDeiTModel, TFRobertaModel, TFVisionTextDualEncoderModel, TFViTModel, VisionTextDualEncoderConfig, ) if is_vision_available(): from PIL import Image from transformers import VisionTextDualEncoderProcessor def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> int: '''simple docstring''' if isinstance(UpperCAmelCase__ , collections.abc.Iterable ): return x return (x, x) @require_tf class lowerCAmelCase__ : def __a ( self : Optional[int] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' pass def __a ( self : Dict ): '''simple docstring''' pass def __a ( self : int ): '''simple docstring''' pass def __a ( self : Any , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : List[Any]=None , **snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Any = VisionTextDualEncoderConfig.from_vision_text_configs(lowercase_ , lowercase_ ) UpperCAmelCase__ : Dict = TFVisionTextDualEncoderModel(lowercase_ ) UpperCAmelCase__ : int = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], config.projection_dim) ) def __a ( self : Tuple , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : List[Any]=None , **snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.get_vision_text_model(lowercase_ , lowercase_ ) UpperCAmelCase__ : Union[str, Any] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) UpperCAmelCase__ : Union[str, Any] = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : List[str]=None , **snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = self.get_vision_text_model(lowercase_ , lowercase_ ) UpperCAmelCase__ : List[str] = {"""vision_model""": vision_model, """text_model""": text_model} UpperCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**lowercase_ ) UpperCAmelCase__ : Tuple = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) self.assertEqual(output["text_embeds"].shape , (input_ids.shape[0], model.config.projection_dim) ) self.assertEqual(output["image_embeds"].shape , (pixel_values.shape[0], model.config.projection_dim) ) def __a ( self : Dict , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any] , snake_case__ : str=None , **snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_vision_text_model(lowercase_ , lowercase_ ) UpperCAmelCase__ : Optional[int] = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) UpperCAmelCase__ : int = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) UpperCAmelCase__ : str = output[0].numpy() with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(lowercase_ ) UpperCAmelCase__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ ) UpperCAmelCase__ : Dict = model(input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ ) UpperCAmelCase__ : Optional[int] = after_output[0].numpy() UpperCAmelCase__ : int = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1e-5 ) def __a ( self : Tuple , snake_case__ : Any , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Any=None , **snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.get_vision_text_model(lowercase_ , lowercase_ ) UpperCAmelCase__ : Any = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) UpperCAmelCase__ : Optional[int] = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) UpperCAmelCase__ : List[Any] = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : int = to_atuple(vision_model.config.image_size ) UpperCAmelCase__ : int = to_atuple(vision_model.config.patch_size ) UpperCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase__ : int = num_patches + 1 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase__ : Optional[int] = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __a ( self : Union[str, Any] , snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float ): '''simple docstring''' UpperCAmelCase__ : str = np.abs((a - b) ).max() self.assertLessEqual(lowercase_ , lowercase_ , f'Difference between torch and flax is {diff} (>= {tol}).' ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_model(**lowercase_ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.prepare_config_and_inputs() self.check_model_from_pretrained_configs(**lowercase_ ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.prepare_config_and_inputs() self.check_vision_text_dual_encoder_from_pretrained(**lowercase_ ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = self.prepare_config_and_inputs() self.check_save_load(**lowercase_ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs() self.check_vision_text_output_attention(**lowercase_ ) @slow def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Tuple = self.get_pretrained_model_and_inputs() UpperCAmelCase__ : List[str] = model_a(**lowercase_ ) UpperCAmelCase__ : Union[str, Any] = outputs[0].numpy() with tempfile.TemporaryDirectory() as tmp_dirname: model_a.save_pretrained(lowercase_ ) UpperCAmelCase__ : str = TFVisionTextDualEncoderModel.from_pretrained(lowercase_ ) UpperCAmelCase__ : int = model_a(**lowercase_ ) UpperCAmelCase__ : str = after_outputs[0].numpy() UpperCAmelCase__ : Dict = np.amax(np.abs(out_a - out_a ) ) self.assertLessEqual(lowercase_ , 1e-5 ) @require_tf class lowerCAmelCase__ ( _UpperCAmelCase , unittest.TestCase ): def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "hf-internal-testing/tiny-random-vit" , "hf-internal-testing/tiny-random-bert" ) UpperCAmelCase__ : Optional[int] = 1_3 UpperCAmelCase__ : Tuple = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase__ : List[str] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) UpperCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] ) UpperCAmelCase__ : Any = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def __a ( self : Any , snake_case__ : Any , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = TFViTModel(lowercase_ , name="vision_model" ) UpperCAmelCase__ : Optional[Any] = TFBertModel(lowercase_ , name="text_model" ) return vision_model, text_model def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = TFViTModelTester(self ) UpperCAmelCase__ : Tuple = TFBertModelTester(self ) UpperCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : Tuple = vision_config_and_inputs ( UpperCAmelCase__ ) : Any = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase__ ( _UpperCAmelCase , unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's # just reinitialize it. UpperCAmelCase__ : str = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-deit-tf" , "hf-internal-testing/tiny-random-roberta" ) UpperCAmelCase__ : Optional[int] = 1_3 UpperCAmelCase__ : List[Any] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) UpperCAmelCase__ : Optional[Any] = random_attention_mask([batch_size, 4] ) UpperCAmelCase__ : str = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def __a ( self : int , snake_case__ : List[str] , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Optional[Any]=None , **snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Any = self.get_vision_text_model(lowercase_ , lowercase_ ) UpperCAmelCase__ : Dict = TFVisionTextDualEncoderModel(vision_model=lowercase_ , text_model=lowercase_ ) UpperCAmelCase__ : int = model( input_ids=lowercase_ , pixel_values=lowercase_ , attention_mask=lowercase_ , output_attentions=lowercase_ ) UpperCAmelCase__ : str = output.vision_model_output.attentions self.assertEqual(len(lowercase_ ) , vision_config.num_hidden_layers ) # in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens) UpperCAmelCase__ : Dict = to_atuple(vision_model.config.image_size ) UpperCAmelCase__ : Tuple = to_atuple(vision_model.config.patch_size ) UpperCAmelCase__ : Optional[int] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) UpperCAmelCase__ : List[str] = num_patches + 2 self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) ) UpperCAmelCase__ : Dict = output.text_model_output.attentions self.assertEqual(len(lowercase_ ) , text_config.num_hidden_layers ) self.assertEqual( text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , ) def __a ( self : List[str] , snake_case__ : List[str] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = TFDeiTModel(lowercase_ , name="vision_model" ) UpperCAmelCase__ : Tuple = TFRobertaModel(lowercase_ , name="text_model" ) return vision_model, text_model def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = TFDeiTModelTester(self ) UpperCAmelCase__ : int = TFRobertaModelTester(self ) UpperCAmelCase__ : Optional[int] = vit_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : int = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : int = vision_config_and_inputs ( UpperCAmelCase__ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_tf class lowerCAmelCase__ ( _UpperCAmelCase , unittest.TestCase ): def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[str] = TFVisionTextDualEncoderModel.from_vision_text_pretrained( "Rocketknight1/tiny-random-clip-tf" , "hf-internal-testing/tiny-random-bert" ) UpperCAmelCase__ : List[Any] = 1_3 UpperCAmelCase__ : Optional[int] = floats_tensor( [ batch_size, model.vision_model.config.num_channels, model.vision_model.config.image_size, model.vision_model.config.image_size, ] ) UpperCAmelCase__ : int = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size ) UpperCAmelCase__ : Any = random_attention_mask([batch_size, 4] ) UpperCAmelCase__ : Optional[int] = {"""pixel_values""": pixel_values, """input_ids""": input_ids, """attention_mask""": attention_mask} return model, inputs def __a ( self : Dict , snake_case__ : str , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = TFCLIPVisionModel(lowercase_ , name="vision_model" ) UpperCAmelCase__ : Dict = TFBertModel(lowercase_ , name="text_model" ) return vision_model, text_model def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = TFCLIPVisionModelTester(self ) UpperCAmelCase__ : Union[str, Any] = TFBertModelTester(self ) UpperCAmelCase__ : int = clip_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : Dict = bert_model_tester.prepare_config_and_inputs() UpperCAmelCase__ : int = vision_config_and_inputs ( UpperCAmelCase__ ) : Tuple = text_config_and_inputs return { "text_config": text_config, "vision_config": vision_config, "pixel_values": pixel_values, "attention_mask": input_mask, "input_ids": input_ids, "text_token_type_ids": token_type_ids, "text_sequence_labels": sequence_labels, "text_token_labels": token_labels, "text_choice_labels": choice_labels, } @require_vision @require_tf class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = TFVisionTextDualEncoderModel.from_pretrained( "clip-italian/clip-italian" , logit_scale_init_value=1.0 , from_pt=lowercase_ ) UpperCAmelCase__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained("clip-italian/clip-italian" ) UpperCAmelCase__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) UpperCAmelCase__ : Any = processor( text=["una foto di un gatto", "una foto di un cane"] , images=lowercase_ , padding=lowercase_ , return_tensors="np" ) UpperCAmelCase__ : Dict = model(**lowercase_ ) # verify the logits self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) ) self.assertEqual( outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , ) UpperCAmelCase__ : List[Any] = np.array([[1.228_4727, 0.310_4122]] ) self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , lowercase_ , atol=1e-3 ) )
371
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
0
"""simple docstring""" from transformers import DistilBertTokenizer, DistilBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from ..bert.test_tokenization_bert import BertTokenizationTest @require_tokenizers class lowerCAmelCase__ ( __lowercase ): SCREAMING_SNAKE_CASE_ =DistilBertTokenizer SCREAMING_SNAKE_CASE_ =DistilBertTokenizerFast SCREAMING_SNAKE_CASE_ =True @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DistilBertTokenizer.from_pretrained("distilbert-base-uncased" ) UpperCAmelCase__ : Optional[Any] = tokenizer.encode("sequence builders" , add_special_tokens=_a ) UpperCAmelCase__ : List[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=_a ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(_a ) UpperCAmelCase__ : int = tokenizer.build_inputs_with_special_tokens(_a , _a ) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [ tokenizer.sep_token_id ]
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' if a < 0: raise ValueError("Input value must be a positive integer" ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): raise TypeError("Input value must be a 'int' type" ) return bin(__lowerCamelCase ).count("1" ) if __name__ == "__main__": import doctest doctest.testmod()
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
"""simple docstring""" import os import random import sys from . import cryptomath_module as cryptoMath # noqa: N812 from . import rabin_miller as rabinMiller # noqa: N812 def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' print("Making key files..." ) make_key_files("rsa" , 1024 ) print("Key files generation successful." ) def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> tuple[tuple[int, int], tuple[int, int]]: '''simple docstring''' print("Generating prime p..." ) UpperCAmelCase__ : Dict = rabinMiller.generate_large_prime(_snake_case ) print("Generating prime q..." ) UpperCAmelCase__ : Optional[int] = rabinMiller.generate_large_prime(_snake_case ) UpperCAmelCase__ : Dict = p * q print("Generating e that is relatively prime to (p - 1) * (q - 1)..." ) while True: UpperCAmelCase__ : Optional[int] = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) ) if cryptoMath.gcd(_snake_case , (p - 1) * (q - 1) ) == 1: break print("Calculating d that is mod inverse of e..." ) UpperCAmelCase__ : Any = cryptoMath.find_mod_inverse(_snake_case , (p - 1) * (q - 1) ) UpperCAmelCase__ : Optional[Any] = (n, e) UpperCAmelCase__ : Any = (n, d) return (public_key, private_key) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : int )-> None: '''simple docstring''' if os.path.exists(f'{name}_pubkey.txt' ) or os.path.exists(f'{name}_privkey.txt' ): print("\nWARNING:" ) print( f'\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n' "Use a different name or delete these files and re-run this program." ) sys.exit() UpperCAmelCase__ : Union[str, Any] = generate_key(_snake_case ) print(f'\nWriting public key to file {name}_pubkey.txt...' ) with open(f'{name}_pubkey.txt' , "w" ) as out_file: out_file.write(f'{key_size},{public_key[0]},{public_key[1]}' ) print(f'Writing private key to file {name}_privkey.txt...' ) with open(f'{name}_privkey.txt' , "w" ) as out_file: out_file.write(f'{key_size},{private_key[0]},{private_key[1]}' ) if __name__ == "__main__": main()
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. _lowerCAmelCase : Optional[int] = 1 # The second color of the flag. _lowerCAmelCase : int = 2 # The third color of the flag. _lowerCAmelCase : Any = (red, white, blue) def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Optional[int]: '''simple docstring''' if not sequence: return [] if len(_A ) == 1: return list(_A ) UpperCAmelCase__ : Optional[int] = 0 UpperCAmelCase__ : List[str] = len(_A ) - 1 UpperCAmelCase__ : Optional[Any] = 0 while mid <= high: if sequence[mid] == colors[0]: UpperCAmelCase__ : Tuple = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: UpperCAmelCase__ : Tuple = sequence[high], sequence[mid] high -= 1 else: UpperCAmelCase__ : Optional[int] = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_A ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : str = input("""Enter numbers separated by commas:\n""").strip() _lowerCAmelCase : Dict = [int(item.strip()) for item in user_input.split(""",""")] print(F"""{dutch_national_flag_sort(unsorted)}""")
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" import numpy as np from numpy import ndarray from scipy.optimize import Bounds, LinearConstraint, minimize def SCREAMING_SNAKE_CASE__ ( snake_case : ndarray )-> float: '''simple docstring''' return np.dot(snake_case , snake_case ) class lowerCAmelCase__ : def __init__( self : Optional[Any] , *, snake_case__ : float = np.inf , snake_case__ : str = "linear" , snake_case__ : float = 0.0 , ): '''simple docstring''' UpperCAmelCase__ : Any = regularization UpperCAmelCase__ : List[str] = gamma if kernel == "linear": UpperCAmelCase__ : Optional[int] = self.__linear elif kernel == "rbf": if self.gamma == 0: raise ValueError("rbf kernel requires gamma" ) if not isinstance(self.gamma , (float, int) ): raise ValueError("gamma must be float or int" ) if not self.gamma > 0: raise ValueError("gamma must be > 0" ) UpperCAmelCase__ : Optional[int] = self.__rbf # in the future, there could be a default value like in sklearn # sklear: def_gamma = 1/(n_features * X.var()) (wiki) # previously it was 1/(n_features) else: UpperCAmelCase__ : Optional[int] = f'Unknown kernel: {kernel}' raise ValueError(__UpperCAmelCase ) def __a ( self : str , snake_case__ : ndarray , snake_case__ : ndarray ): '''simple docstring''' return np.dot(__UpperCAmelCase , __UpperCAmelCase ) def __a ( self : Optional[Any] , snake_case__ : ndarray , snake_case__ : ndarray ): '''simple docstring''' return np.exp(-(self.gamma * norm_squared(vectora - vectora )) ) def __a ( self : int , snake_case__ : list[ndarray] , snake_case__ : ndarray ): '''simple docstring''' UpperCAmelCase__ : List[str] = observations UpperCAmelCase__ : Tuple = classes # using Wolfe's Dual to calculate w. # Primal problem: minimize 1/2*norm_squared(w) # constraint: yn(w . xn + b) >= 1 # # With l a vector # Dual problem: maximize sum_n(ln) - # 1/2 * sum_n(sum_m(ln*lm*yn*ym*xn . xm)) # constraint: self.C >= ln >= 0 # and sum_n(ln*yn) = 0 # Then we get w using w = sum_n(ln*yn*xn) # At the end we can get b ~= mean(yn - w . xn) # # Since we use kernels, we only need l_star to calculate b # and to classify observations ((UpperCAmelCase__ ) , ) : List[Any] = np.shape(__UpperCAmelCase ) def to_minimize(snake_case__ : ndarray ) -> float: UpperCAmelCase__ : Optional[Any] = 0 ((UpperCAmelCase__ ) , ) : str = np.shape(__UpperCAmelCase ) for i in range(__UpperCAmelCase ): for j in range(__UpperCAmelCase ): s += ( candidate[i] * candidate[j] * classes[i] * classes[j] * self.kernel(observations[i] , observations[j] ) ) return 1 / 2 * s - sum(__UpperCAmelCase ) UpperCAmelCase__ : Tuple = LinearConstraint(__UpperCAmelCase , 0 , 0 ) UpperCAmelCase__ : List[Any] = Bounds(0 , self.regularization ) UpperCAmelCase__ : List[str] = minimize( __UpperCAmelCase , np.ones(__UpperCAmelCase ) , bounds=__UpperCAmelCase , constraints=[ly_contraint] ).x UpperCAmelCase__ : Dict = l_star # calculating mean offset of separation plane to points UpperCAmelCase__ : Union[str, Any] = 0 for i in range(__UpperCAmelCase ): for j in range(__UpperCAmelCase ): s += classes[i] - classes[i] * self.optimum[i] * self.kernel( observations[i] , observations[j] ) UpperCAmelCase__ : Union[str, Any] = s / n def __a ( self : Dict , snake_case__ : ndarray ): '''simple docstring''' UpperCAmelCase__ : Dict = sum( self.optimum[n] * self.classes[n] * self.kernel(self.observations[n] , __UpperCAmelCase ) for n in range(len(self.classes ) ) ) return 1 if s + self.offset >= 0 else -1 if __name__ == "__main__": import doctest doctest.testmod()
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> Tuple: '''simple docstring''' while b: UpperCAmelCase__ : Optional[int] = b, a % b return a def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> Any: '''simple docstring''' return a if b == 0 else euclidean_gcd_recursive(snake_case_ , a % b ) def SCREAMING_SNAKE_CASE__ ( )-> str: '''simple docstring''' print(f'euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}' ) print(f'euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}' ) print(f'euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}' ) print(f'euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}' ) print(f'euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}' ) print(f'euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}' ) print(f'euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}' ) print(f'euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}' ) print(f'euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}' ) if __name__ == "__main__": main()
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available() and is_transformers_version(""">=""", """4.25.0""")): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ( VersatileDiffusionDualGuidedPipeline, VersatileDiffusionImageVariationPipeline, VersatileDiffusionPipeline, VersatileDiffusionTextToImagePipeline, ) else: from .modeling_text_unet import UNetFlatConditionModel from .pipeline_versatile_diffusion import VersatileDiffusionPipeline from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ ='efficientformer' def __init__( self : str , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : Tuple , ): '''simple docstring''' super().__init__(**UpperCamelCase_ ) UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : Any = hidden_sizes UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Tuple = num_attention_heads UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : List[str] = layer_norm_eps UpperCAmelCase__ : Any = patch_size UpperCAmelCase__ : Any = num_channels UpperCAmelCase__ : Optional[Any] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Tuple = downsamples UpperCAmelCase__ : Optional[Any] = dim UpperCAmelCase__ : List[Any] = key_dim UpperCAmelCase__ : Union[str, Any] = attention_ratio UpperCAmelCase__ : List[str] = resolution UpperCAmelCase__ : Dict = pool_size UpperCAmelCase__ : Tuple = downsample_patch_size UpperCAmelCase__ : Optional[Any] = downsample_stride UpperCAmelCase__ : Union[str, Any] = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[Any] = distillation UpperCAmelCase__ : Tuple = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[int] = image_size UpperCAmelCase__ : Dict = batch_norm_eps
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" from ..utils import DummyObject, requires_backends class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Dict , *snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Any , *snake_case__ : int , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Any , *snake_case__ : str , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Optional[int] , *snake_case__ : Tuple , **snake_case__ : Any ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : str , **snake_case__ : str ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Optional[Any] , *snake_case__ : Optional[Any] , **snake_case__ : Any ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Optional[Any] , *snake_case__ : Dict , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Dict , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : str , *snake_case__ : str , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : int , *snake_case__ : Dict , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : int , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Dict ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Optional[int] , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : str , *snake_case__ : Dict , **snake_case__ : Union[str, Any] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : int , **snake_case__ : Tuple ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : Tuple , **snake_case__ : Dict ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Tuple , *snake_case__ : Dict , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : List[Any] , **snake_case__ : Any ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Any , *snake_case__ : Any , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : List[Any] , *snake_case__ : int , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Optional[int] , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Dict , *snake_case__ : str , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : str , **snake_case__ : Any ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : Optional[Any] , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Tuple , *snake_case__ : Optional[Any] , **snake_case__ : Any ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : Optional[int] , **snake_case__ : List[str] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : List[Any] , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Optional[int] , *snake_case__ : List[str] , **snake_case__ : Any ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Dict , *snake_case__ : Optional[int] , **snake_case__ : Optional[Any] ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : Union[str, Any] , **snake_case__ : Any ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Any , *snake_case__ : Dict , **snake_case__ : Optional[int] ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : List[Any] , **snake_case__ : str ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : int , *snake_case__ : str , **snake_case__ : int ): '''simple docstring''' requires_backends(cls , ["flax"] ) class lowerCAmelCase__ ( metaclass=lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ =['flax'] def __init__( self : Optional[Any] , *snake_case__ : List[str] , **snake_case__ : Dict ): '''simple docstring''' requires_backends(self , ["flax"] ) @classmethod def __a ( cls : Tuple , *snake_case__ : int , **snake_case__ : int ): '''simple docstring''' requires_backends(cls , ["flax"] ) @classmethod def __a ( cls : Union[str, Any] , *snake_case__ : Dict , **snake_case__ : Union[str, Any] ): '''simple docstring''' requires_backends(cls , ["flax"] )
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" import argparse import torch from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging logging.set_verbosity_info() def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : str , snake_case : Dict )-> Optional[int]: '''simple docstring''' if openai_config_file == "": UpperCAmelCase__ : Tuple = OpenAIGPTConfig() else: UpperCAmelCase__ : Optional[Any] = OpenAIGPTConfig.from_json_file(_UpperCamelCase ) UpperCAmelCase__ : str = OpenAIGPTModel(_UpperCamelCase ) # Load weights from numpy load_tf_weights_in_openai_gpt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) # Save pytorch-model UpperCAmelCase__ : Optional[int] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME UpperCAmelCase__ : int = pytorch_dump_folder_path + "/" + CONFIG_NAME print(f'Save PyTorch model to {pytorch_weights_dump_path}' ) torch.save(model.state_dict() , _UpperCamelCase ) print(f'Save configuration file to {pytorch_config_dump_path}' ) with open(_UpperCamelCase , "w" , encoding="utf-8" ) as f: f.write(config.to_json_string() ) if __name__ == "__main__": _lowerCAmelCase : Dict = argparse.ArgumentParser() # Required parameters parser.add_argument( """--openai_checkpoint_folder_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model.""" ) parser.add_argument( """--openai_config_file""", default="""""", type=str, help=( """An optional config json file corresponding to the pre-trained OpenAI model. \n""" """This specifies the model architecture.""" ), ) _lowerCAmelCase : int = parser.parse_args() convert_openai_checkpoint_to_pytorch( args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path )
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import _LazyModule _lowerCAmelCase : Optional[Any] = {"""tokenization_bertweet""": ["""BertweetTokenizer"""]} if TYPE_CHECKING: from .tokenization_bertweet import BertweetTokenizer else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" from __future__ import annotations def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : List[Any] )-> bool: '''simple docstring''' if len(a__ ) == 0: return False UpperCAmelCase__ : Dict = len(a__ ) // 2 if a_list[midpoint] == item: return True if item < a_list[midpoint]: return binary_search(a_list[:midpoint] , a__ ) else: return binary_search(a_list[midpoint + 1 :] , a__ ) if __name__ == "__main__": _lowerCAmelCase : Any = input("""Enter numbers separated by comma:\n""").strip() _lowerCAmelCase : int = [int(item.strip()) for item in user_input.split(""",""")] _lowerCAmelCase : Any = int(input("""Enter the number to be found in the list:\n""").strip()) _lowerCAmelCase : Optional[Any] = '' if binary_search(sequence, target) else 'not ' print(F"""{target} was {not_str}found in {sequence}""")
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" # Copyright 2021 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from accelerate.test_utils import execute_subprocess_async def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any]=None )-> Optional[int]: '''simple docstring''' if subparsers is not None: UpperCAmelCase__ : Optional[int] = subparsers.add_parser("test" ) else: UpperCAmelCase__ : Dict = argparse.ArgumentParser("Accelerate test command" ) parser.add_argument( "--config_file" , default=lowercase_ , help=( "The path to use to store the config file. Will default to a file named default_config.yaml in the cache " "location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have " "such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed " "with 'huggingface'." ) , ) if subparsers is not None: parser.set_defaults(func=lowercase_ ) return parser def SCREAMING_SNAKE_CASE__ ( snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Tuple = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ["test_utils", "scripts", "test_script.py"] ) if args.config_file is None: UpperCAmelCase__ : Any = script_name else: UpperCAmelCase__ : Optional[int] = f'--config_file={args.config_file} {script_name}' UpperCAmelCase__ : List[Any] = ["accelerate-launch"] + test_args.split() UpperCAmelCase__ : Union[str, Any] = execute_subprocess_async(lowercase_ , env=os.environ.copy() ) if result.returncode == 0: print("Test is a success! You are ready for your distributed training!" ) def SCREAMING_SNAKE_CASE__ ( )-> List[str]: '''simple docstring''' UpperCAmelCase__ : List[Any] = test_command_parser() UpperCAmelCase__ : List[Any] = parser.parse_args() test_command(lowercase_ ) if __name__ == "__main__": main()
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" import tempfile import unittest from pathlib import Path from shutil import copyfile from transformers import MaMaaaTokenizer, is_torch_available from transformers.testing_utils import ( get_tests_dir, nested_simplify, require_sentencepiece, require_tokenizers, require_torch, slow, ) from transformers.utils import is_sentencepiece_available if is_sentencepiece_available(): from transformers.models.mam_aaa.tokenization_mam_aaa import VOCAB_FILES_NAMES, save_json from ...test_tokenization_common import TokenizerTesterMixin if is_sentencepiece_available(): _lowerCAmelCase : Any = get_tests_dir("""fixtures/test_sentencepiece.model""") if is_torch_available(): from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right _lowerCAmelCase : List[Any] = 128_022 _lowerCAmelCase : Optional[int] = 128_028 @require_sentencepiece class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =MaMaaaTokenizer SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =True def __a ( self : Optional[Any] ): '''simple docstring''' super().setUp() UpperCAmelCase__ : int = ["</s>", "<unk>", "▁This", "▁is", "▁a", "▁t", "est", "\u0120", "<pad>"] UpperCAmelCase__ : List[Any] = dict(zip(_a , range(len(_a ) ) ) ) UpperCAmelCase__ : Optional[Any] = Path(self.tmpdirname ) save_json(_a , save_dir / VOCAB_FILES_NAMES["vocab_file"] ) if not (save_dir / VOCAB_FILES_NAMES["spm_file"]).exists(): copyfile(_a , save_dir / VOCAB_FILES_NAMES["spm_file"] ) UpperCAmelCase__ : Tuple = MaMaaaTokenizer.from_pretrained(self.tmpdirname ) tokenizer.save_pretrained(self.tmpdirname ) def __a ( self : List[str] , **snake_case__ : int ): '''simple docstring''' return MaMaaaTokenizer.from_pretrained(self.tmpdirname , **_a ) def __a ( self : Any , snake_case__ : Union[str, Any] ): '''simple docstring''' return ( "This is a test", "This is a test", ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = "</s>" UpperCAmelCase__ : int = 0 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_a ) , _a ) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_a ) , _a ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = self.get_tokenizer() UpperCAmelCase__ : Optional[int] = list(tokenizer.get_vocab().keys() ) self.assertEqual(vocab_keys[0] , "</s>" ) self.assertEqual(vocab_keys[1] , "<unk>" ) self.assertEqual(vocab_keys[-1] , "<s>" ) self.assertEqual(len(_a ) , tokenizer.vocab_size + len(tokenizer.get_added_vocab() ) ) @unittest.skip("Skip this test while all models are still to be uploaded." ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.get_tokenizer() UpperCAmelCase__ : str = tokenizer.tokenize("This is a test" ) self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] ) self.assertListEqual( tokenizer.convert_tokens_to_ids(_a ) , [2, 3, 4, 5, 6] , ) UpperCAmelCase__ : Optional[int] = tokenizer.convert_ids_to_tokens([2, 3, 4, 5, 6] ) self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] ) UpperCAmelCase__ : Tuple = tokenizer.convert_tokens_to_string(_a ) self.assertEqual(_a , "This is a test" ) @slow def __a ( self : Tuple ): '''simple docstring''' # fmt: off UpperCAmelCase__ : List[Any] = {"input_ids": [[1_2_8_0_2_2, 1_1_0_1_0_8, 3_9_7, 1_1, 3_8_2_7_2, 2_2_4_7, 1_2_4_8_1_1, 2_8_5, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 3_9_5_3_4, 4_4_2_8, 3_9_7, 1_0_1_9, 1_8_1_0_5, 1_5_8_6, 2_0_7, 7, 4_1_3_3_7, 1_6_7_8_6, 2_4_1, 7, 2_0_2_1_4, 1_7, 1_2_5_6_9_0, 1_0_3_9_8, 7, 4_4_3_7_8, 5_8_0_6_9, 6_8_3_4_2, 7_7_9_8, 7_3_4_3, 1_1, 2_9_9, 3_3_3_1_0, 4, 1_5_8, 3_7_3_5_0, 9_4_0_7_7, 4_5_6_9, 2_9_9, 3_3_3_1_0, 9_0, 4, 5_2_8_4_0, 2_9_0, 4, 3_1_2_7_0, 1_1_2, 2_9_9, 6_8_2, 4, 5_2_8_4_0, 3_9_9_5_3, 1_4_0_7_9, 1_9_3, 5_2_5_1_9, 9_0_8_9_4, 1_7_8_9_4, 1_2_0_6_9_7, 1_1, 4_0_4_4_5, 5_5_1, 1_7, 1_0_1_9, 5_2_5_1_9, 9_0_8_9_4, 1_7_7_5_6, 9_6_3, 1_1, 4_0_4_4_5, 4_8_0, 1_7, 9_7_9_2, 1_1_2_0, 5_1_7_3, 1_3_9_3, 6_2_4_0, 1_6_7_8_6, 2_4_1, 1_2_0_9_9_6, 2_8, 1_2_4_5, 1_3_9_3, 1_1_8_2_4_0, 1_1_1_2_3, 1_0_1_9, 9_3_6_1_2, 2_6_9_1, 1_0_6_1_8, 9_8_0_5_8, 1_2_0_4_0_9, 1_9_2_8, 2_7_9, 4, 4_0_6_8_3, 3_6_7, 1_7_8, 2_0_7, 1_0_1_9, 1_0_3, 1_0_3_1_2_1, 5_0_6, 6_5_2_9_6, 5, 2], [1_2_8_0_2_2, 2_1_2_1_7, 3_6_7, 1_1_7, 1_2_5_4_5_0, 1_2_8, 7_1_9, 7, 7_3_0_8, 4_0, 9_3_6_1_2, 1_2_6_6_9, 1_1_1_6, 1_6_7_0_4, 7_1, 1_7_7_8_5, 3_6_9_9, 1_5_5_9_2, 3_5, 1_4_4, 9_5_8_4, 2_4_1, 1_1_9_4_3, 7_1_3, 9_5_0, 7_9_9, 2_2_4_7, 8_8_4_2_7, 1_5_0, 1_4_9, 1_1_8_8_1_3, 1_2_0_7_0_6, 1_0_1_9, 1_0_6_9_0_6, 8_1_5_1_8, 2_8, 1_2_2_4, 2_2_7_9_9, 3_9_7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1_2_8_0_2_2, 1_6_5_8, 1_2_3_3_1_1, 5_1_5_5, 5_5_7_8, 4_7_2_2, 2_7_9, 1_4_9_4_7, 2_3_6_6, 1_1_2_0, 1_1_9_7, 1_4, 1_3_4_8, 9_2_3_2, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501 # fmt: on self.tokenizer_integration_test_util( expected_encoding=_a , model_name="facebook/m2m100_418M" , revision="c168bae485c864188cf9aa0e4108b0b6934dc91e" , ) @require_torch @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ ='''facebook/m2m100_418M''' SCREAMING_SNAKE_CASE_ =[ '''In my opinion, there are two levels of response from the French government.''', '''NSA Affair Emphasizes Complete Lack of Debate on Intelligence''', ] SCREAMING_SNAKE_CASE_ =[ '''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''', '''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''', ] # fmt: off SCREAMING_SNAKE_CASE_ =[EN_CODE, 593, 1949, 115781, 4, 71586, 4234, 60633, 126233, 432, 123808, 15592, 1197, 117132, 120618, 5, 2] @classmethod def __a ( cls : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = MaMaaaTokenizer.from_pretrained( cls.checkpoint_name , src_lang="en" , tgt_lang="fr" ) UpperCAmelCase__ : Tuple = 1 return cls def __a ( self : Union[str, Any] ): '''simple docstring''' self.assertEqual(self.tokenizer.get_lang_id("ar" ) , 1_2_8_0_0_6 ) self.assertEqual(self.tokenizer.get_lang_id("en" ) , 1_2_8_0_2_2 ) self.assertEqual(self.tokenizer.get_lang_id("ro" ) , 1_2_8_0_7_6 ) self.assertEqual(self.tokenizer.get_lang_id("mr" ) , 1_2_8_0_6_3 ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.tokenizer.get_vocab() self.assertEqual(len(_a ) , self.tokenizer.vocab_size ) self.assertEqual(vocab["<unk>"] , 3 ) self.assertIn(self.tokenizer.get_lang_token("en" ) , _a ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = "en" UpperCAmelCase__ : str = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0] self.assertListEqual(self.expected_src_tokens , _a ) def __a ( self : str ): '''simple docstring''' self.assertIn(_a , self.tokenizer.all_special_ids ) # fmt: off UpperCAmelCase__ : List[Any] = [FR_CODE, 5_3_6_4, 8_2, 8_6_4_2, 4, 2_9_4, 4_7, 8, 1_4_0_2_8, 1_3_6, 3_2_8_6, 9_7_0_6, 6, 9_0_7_9_7, 6, 1_4_4_0_1_2, 1_6_2, 8_8_1_2_8, 3_0_0_6_1, 5, 2] # fmt: on UpperCAmelCase__ : Tuple = self.tokenizer.decode(_a , skip_special_tokens=_a ) UpperCAmelCase__ : Dict = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a ) self.assertEqual(_a , _a ) self.assertNotIn(self.tokenizer.eos_token , _a ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = tempfile.mkdtemp() UpperCAmelCase__ : List[str] = self.tokenizer.lang_token_to_id self.tokenizer.save_pretrained(_a ) UpperCAmelCase__ : int = MaMaaaTokenizer.from_pretrained(_a ) self.assertDictEqual(new_tok.lang_token_to_id , _a ) @require_torch def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = "en" UpperCAmelCase__ : int = "fr" UpperCAmelCase__ : Dict = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_a , return_tensors="pt" ) UpperCAmelCase__ : List[str] = shift_tokens_right( batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.eos_token_id ) for k in batch: UpperCAmelCase__ : Optional[int] = batch[k].tolist() # batch = {k: v.tolist() for k,v in batch.items()} # fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4 # batch.decoder_inputs_ids[0][0] == assert batch.input_ids[1][0] == EN_CODE assert batch.input_ids[1][-1] == 2 assert batch.labels[1][0] == FR_CODE assert batch.labels[1][-1] == 2 assert batch.decoder_input_ids[1][:2] == [2, FR_CODE] @require_torch def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Any = "mr" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) UpperCAmelCase__ : Dict = "zh" self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) @require_torch def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = "mr" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("mr" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) UpperCAmelCase__ : Optional[Any] = "zh" self.tokenizer._switch_to_target_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id("zh" )] ) self.assertListEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] ) self.tokenizer._switch_to_input_mode() self.assertListEqual(self.tokenizer.prefix_tokens , [self.tokenizer.get_lang_id(self.tokenizer.src_lang )] ) @require_torch def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = self.tokenizer._build_translation_inputs("A test" , return_tensors="pt" , src_lang="en" , tgt_lang="ar" ) self.assertEqual( nested_simplify(_a ) , { # en_XX, A, test, EOS "input_ids": [[1_2_8_0_2_2, 5_8, 4_1_8_3, 2]], "attention_mask": [[1, 1, 1, 1]], # ar_AR "forced_bos_token_id": 1_2_8_0_0_6, } , )
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" _lowerCAmelCase : Union[str, Any] = 0 # The first color of the flag. _lowerCAmelCase : int = 1 # The second color of the flag. _lowerCAmelCase : int = 2 # The third color of the flag. _lowerCAmelCase : Tuple = (red, white, blue) def SCREAMING_SNAKE_CASE__ ( snake_case : list )-> Optional[Any]: '''simple docstring''' if not sequence: return [] if len(_a ) == 1: return list(_a ) UpperCAmelCase__ : Any = 0 UpperCAmelCase__ : str = len(_a ) - 1 UpperCAmelCase__ : Any = 0 while mid <= high: if sequence[mid] == colors[0]: UpperCAmelCase__ : int = sequence[mid], sequence[low] low += 1 mid += 1 elif sequence[mid] == colors[1]: mid += 1 elif sequence[mid] == colors[2]: UpperCAmelCase__ : Union[str, Any] = sequence[high], sequence[mid] high -= 1 else: UpperCAmelCase__ : Dict = f'The elements inside the sequence must contains only {colors} values' raise ValueError(_a ) return sequence if __name__ == "__main__": import doctest doctest.testmod() _lowerCAmelCase : Union[str, Any] = input("""Enter numbers separated by commas:\n""").strip() _lowerCAmelCase : Union[str, Any] = [int(item.strip()) for item in user_input.split(""",""")] print(F"""{dutch_national_flag_sort(unsorted)}""")
364
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class lowerCAmelCase__ : @staticmethod def __a ( *snake_case__ : List[Any] , **snake_case__ : Tuple ): '''simple docstring''' pass @is_pipeline_test @require_vision @require_timm @require_torch class lowerCAmelCase__ ( unittest.TestCase ): SCREAMING_SNAKE_CASE_ =MODEL_FOR_OBJECT_DETECTION_MAPPING def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = ObjectDetectionPipeline(model=UpperCAmelCase_ , image_processor=UpperCAmelCase_ ) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def __a ( self : Any , snake_case__ : int , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 ) self.assertGreater(len(UpperCAmelCase_ ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCAmelCase_ , { "score": ANY(UpperCAmelCase_ ), "label": ANY(UpperCAmelCase_ ), "box": {"xmin": ANY(UpperCAmelCase_ ), "ymin": ANY(UpperCAmelCase_ ), "xmax": ANY(UpperCAmelCase_ ), "ymax": ANY(UpperCAmelCase_ )}, } , ) import datasets UpperCAmelCase__ : List[str] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" ) UpperCAmelCase__ : Optional[int] = [ Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ), "http://images.cocodataset.org/val2017/000000039769.jpg", # RGBA dataset[0]["file"], # LA dataset[1]["file"], # L dataset[2]["file"], ] UpperCAmelCase__ : int = object_detector(UpperCAmelCase_ , threshold=0.0 ) self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) ) for outputs in batch_outputs: self.assertGreater(len(UpperCAmelCase_ ) , 0 ) for detected_object in outputs: self.assertEqual( UpperCAmelCase_ , { "score": ANY(UpperCAmelCase_ ), "label": ANY(UpperCAmelCase_ ), "box": {"xmin": ANY(UpperCAmelCase_ ), "ymin": ANY(UpperCAmelCase_ ), "xmax": ANY(UpperCAmelCase_ ), "ymax": ANY(UpperCAmelCase_ )}, } , ) @require_tf @unittest.skip("Object detection not implemented in TF" ) def __a ( self : Dict ): '''simple docstring''' pass @require_torch def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : int = "hf-internal-testing/tiny-detr-mobilenetsv3" UpperCAmelCase__ : Dict = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ ) UpperCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) UpperCAmelCase__ : str = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) UpperCAmelCase__ : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ] , ) UpperCAmelCase__ : Any = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ], [ {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, {"score": 0.3376, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}}, ], ] , ) @require_torch @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = "facebook/detr-resnet-50" UpperCAmelCase__ : Tuple = AutoModelForObjectDetection.from_pretrained(UpperCAmelCase_ ) UpperCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(UpperCAmelCase_ ) UpperCAmelCase__ : Tuple = ObjectDetectionPipeline(model=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ ) UpperCAmelCase__ : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) UpperCAmelCase__ : List[Any] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], ] , ) @require_torch @slow def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : int = "facebook/detr-resnet-50" UpperCAmelCase__ : List[str] = pipeline("object-detection" , model=UpperCAmelCase_ ) UpperCAmelCase__ : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) UpperCAmelCase__ : List[str] = object_detector( [ "http://images.cocodataset.org/val2017/000000039769.jpg", "http://images.cocodataset.org/val2017/000000039769.jpg", ] ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], [ {"score": 0.9982, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}}, {"score": 0.9960, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}}, {"score": 0.9955, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}}, {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ], ] , ) @require_torch @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = 0.9985 UpperCAmelCase__ : Optional[Any] = "facebook/detr-resnet-50" UpperCAmelCase__ : int = pipeline("object-detection" , model=UpperCAmelCase_ ) UpperCAmelCase__ : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=UpperCAmelCase_ ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"score": 0.9988, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}}, {"score": 0.9987, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}}, ] , ) @require_torch @require_pytesseract @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = "Narsil/layoutlmv3-finetuned-funsd" UpperCAmelCase__ : Dict = 0.9993 UpperCAmelCase__ : List[str] = pipeline("object-detection" , model=UpperCAmelCase_ , threshold=UpperCAmelCase_ ) UpperCAmelCase__ : Tuple = object_detector( "https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" ) self.assertEqual( nested_simplify(UpperCAmelCase_ , decimals=4 ) , [ {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}}, {"score": 0.9993, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}}, ] , )
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" import jax.numpy as jnp from ...utils import logging from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel from .configuration_mta import MTaConfig _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = """T5Config""" def SCREAMING_SNAKE_CASE__ ( snake_case : jnp.array , snake_case : int , snake_case : int )-> jnp.ndarray: UpperCAmelCase__ : Tuple = jnp.zeros_like(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase__ : Optional[int] = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] ) UpperCAmelCase__ : int = shifted_input_ids.at[:, 0].set(SCREAMING_SNAKE_CASE_ ) UpperCAmelCase__ : Tuple = jnp.where(shifted_input_ids == -100 , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) return shifted_input_ids class lowerCAmelCase__ ( lowerCamelCase_ ): SCREAMING_SNAKE_CASE_ ="""mt5""" SCREAMING_SNAKE_CASE_ =MTaConfig class lowerCAmelCase__ ( lowerCamelCase_ ): SCREAMING_SNAKE_CASE_ ="""mt5""" SCREAMING_SNAKE_CASE_ =MTaConfig class lowerCAmelCase__ ( lowerCamelCase_ ): SCREAMING_SNAKE_CASE_ ="""mt5""" SCREAMING_SNAKE_CASE_ =MTaConfig
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
0
"""simple docstring""" from __future__ import annotations from itertools import permutations from random import randint from timeit import repeat def SCREAMING_SNAKE_CASE__ ( )-> tuple[list[int], int]: '''simple docstring''' UpperCAmelCase__ : str = [randint(-1000 , 1000 ) for i in range(10 )] UpperCAmelCase__ : int = randint(-5000 , 5000 ) return (arr, r) _lowerCAmelCase : List[str] = make_dataset() def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] , snake_case : int )-> tuple[int, ...]: '''simple docstring''' for triplet in permutations(UpperCamelCase__ , 3 ): if sum(UpperCamelCase__ ) == target: return tuple(sorted(UpperCamelCase__ ) ) return (0, 0, 0) def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] , snake_case : int )-> tuple[int, int, int]: '''simple docstring''' arr.sort() UpperCAmelCase__ : int = len(UpperCamelCase__ ) for i in range(n - 1 ): UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = i + 1, n - 1 while left < right: if arr[i] + arr[left] + arr[right] == target: return (arr[i], arr[left], arr[right]) elif arr[i] + arr[left] + arr[right] < target: left += 1 elif arr[i] + arr[left] + arr[right] > target: right -= 1 return (0, 0, 0) def SCREAMING_SNAKE_CASE__ ( )-> tuple[float, float]: '''simple docstring''' UpperCAmelCase__ : int = "\nfrom __main__ import dataset, triplet_sum1, triplet_sum2\n" UpperCAmelCase__ : Tuple = "\ntriplet_sum1(*dataset)\n" UpperCAmelCase__ : int = "\ntriplet_sum2(*dataset)\n" UpperCAmelCase__ : str = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 ) UpperCAmelCase__ : Dict = repeat(setup=UpperCamelCase__ , stmt=UpperCamelCase__ , repeat=5 , number=1_0000 ) return (min(UpperCamelCase__ ), min(UpperCamelCase__ )) if __name__ == "__main__": from doctest import testmod testmod() _lowerCAmelCase : List[str] = solution_times() print(F"""The time for naive implementation is {times[0]}.""") print(F"""The time for optimized implementation is {times[1]}.""")
367
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
0
"""simple docstring""" from .dependency_versions_table import deps from .utils.versions import require_version, require_version_core # define which module versions we always want to check at run time # (usually the ones defined in `install_requires` in setup.py) # # order specific notes: # - tqdm must be checked before tokenizers _lowerCAmelCase : Any = [ """python""", """tqdm""", """regex""", """requests""", """packaging""", """filelock""", """numpy""", """tokenizers""", """huggingface-hub""", """safetensors""", """accelerate""", """pyyaml""", ] for pkg in pkgs_to_check_at_runtime: if pkg in deps: if pkg == "tokenizers": # must be loaded here, or else tqdm check may fail from .utils import is_tokenizers_available if not is_tokenizers_available(): continue # not required, check version only if installed elif pkg == "accelerate": # must be loaded here, or else tqdm check may fail from .utils import is_accelerate_available # Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of # Transformers with PyTorch if not is_accelerate_available(): continue # not required, check version only if installed require_version_core(deps[pkg]) else: raise ValueError(F"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""") def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple=None )-> List[Any]: '''simple docstring''' require_version(deps[pkg] , __lowerCamelCase )
368
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
0
from collections import OrderedDict from typing import Mapping from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { "junnyu/roformer_chinese_small": "https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json", "junnyu/roformer_chinese_base": "https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json", "junnyu/roformer_chinese_char_small": ( "https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json" ), "junnyu/roformer_chinese_char_base": ( "https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json" ), "junnyu/roformer_small_discriminator": ( "https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json" ), "junnyu/roformer_small_generator": ( "https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json" ), # See all RoFormer models at https://huggingface.co/models?filter=roformer } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''roformer''' def __init__( self : int , snake_case__ : Tuple=5_0_0_0_0 , snake_case__ : List[str]=None , snake_case__ : Union[str, Any]=7_6_8 , snake_case__ : List[Any]=1_2 , snake_case__ : Tuple=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : Dict="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : List[str]=0.1 , snake_case__ : Dict=1_5_3_6 , snake_case__ : Optional[Any]=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : Union[str, Any]=0 , snake_case__ : Dict=False , snake_case__ : Optional[int]=True , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__(pad_token_id=_a , **_a ) UpperCAmelCase__ : Tuple = vocab_size UpperCAmelCase__ : Dict = hidden_size if embedding_size is None else embedding_size UpperCAmelCase__ : int = hidden_size UpperCAmelCase__ : int = num_hidden_layers UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : Union[str, Any] = hidden_act UpperCAmelCase__ : Optional[int] = intermediate_size UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : Union[str, Any] = attention_probs_dropout_prob UpperCAmelCase__ : Tuple = max_position_embeddings UpperCAmelCase__ : str = type_vocab_size UpperCAmelCase__ : Dict = initializer_range UpperCAmelCase__ : Tuple = layer_norm_eps UpperCAmelCase__ : Optional[int] = rotary_value UpperCAmelCase__ : List[Any] = use_cache class lowerCAmelCase__ ( __magic_name__ ): @property def __a ( self : List[Any] ): '''simple docstring''' if self.task == "multiple-choice": UpperCAmelCase__ : str = {0: "batch", 1: "choice", 2: "sequence"} else: UpperCAmelCase__ : str = {0: "batch", 1: "sequence"} UpperCAmelCase__ : Tuple = {0: "batch", 1: "sequence"} return OrderedDict( [ ("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis), ] )
369
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
0
"""simple docstring""" import argparse import logging import pickle from collections import Counter logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""", datefmt="""%m/%d/%Y %H:%M:%S""", level=logging.INFO ) _lowerCAmelCase : Optional[Any] = logging.getLogger(__name__) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = argparse.ArgumentParser( description="""Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)""" ) parser.add_argument( """--data_file""", type=str, default="""data/dump.bert-base-uncased.pickle""", help="""The binarized dataset.""" ) parser.add_argument( """--token_counts_dump""", type=str, default="""data/token_counts.bert-base-uncased.pickle""", help="""The dump file.""" ) parser.add_argument("""--vocab_size""", default=30_522, type=int) _lowerCAmelCase : Tuple = parser.parse_args() logger.info(F"""Loading data from {args.data_file}""") with open(args.data_file, """rb""") as fp: _lowerCAmelCase : Dict = pickle.load(fp) logger.info("""Counting occurrences for MLM.""") _lowerCAmelCase : Tuple = Counter() for tk_ids in data: counter.update(tk_ids) _lowerCAmelCase : List[Any] = [0] * args.vocab_size for k, v in counter.items(): _lowerCAmelCase : int = v logger.info(F"""Dump to {args.token_counts_dump}""") with open(args.token_counts_dump, """wb""") as handle: pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
370
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
0
"""simple docstring""" import gc import random import unittest import numpy as np import torch from transformers import ( CLIPImageProcessor, CLIPTextConfig, CLIPTextModel, CLIPTokenizer, CLIPVisionConfig, CLIPVisionModelWithProjection, ) from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel from diffusers.pipelines.pipeline_utils import DiffusionPipeline from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import ( enable_full_determinism, floats_tensor, load_image, load_numpy, require_torch_gpu, skip_mps, slow, torch_device, ) from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS from ..test_pipelines_common import ( PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin, assert_mean_pixel_difference, ) enable_full_determinism() class lowerCAmelCase__ ( _a , _a , _a , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =StableUnCLIPImgaImgPipeline SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_VARIATION_PARAMS SCREAMING_SNAKE_CASE_ =TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =frozenset( [] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess SCREAMING_SNAKE_CASE_ =frozenset([] ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = 3_2 UpperCAmelCase__ : int = embedder_hidden_size # image encoding components UpperCAmelCase__ : Tuple = CLIPImageProcessor(crop_size=3_2 , size=3_2 ) torch.manual_seed(0 ) UpperCAmelCase__ : List[Any] = CLIPVisionModelWithProjection( CLIPVisionConfig( hidden_size=snake_case_ , projection_dim=snake_case_ , num_hidden_layers=5 , num_attention_heads=4 , image_size=3_2 , intermediate_size=3_7 , patch_size=1 , ) ) # regular denoising components torch.manual_seed(0 ) UpperCAmelCase__ : Any = StableUnCLIPImageNormalizer(embedding_dim=snake_case_ ) UpperCAmelCase__ : Optional[Any] = DDPMScheduler(beta_schedule="squaredcos_cap_v2" ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" ) torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = CLIPTextModel( CLIPTextConfig( bos_token_id=0 , eos_token_id=2 , hidden_size=snake_case_ , projection_dim=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , ) ) torch.manual_seed(0 ) UpperCAmelCase__ : Dict = UNetaDConditionModel( sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(3_2, 6_4) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=snake_case_ , layers_per_block=1 , upcast_attention=snake_case_ , use_linear_projection=snake_case_ , ) torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = DDIMScheduler( beta_schedule="scaled_linear" , beta_start=0.0_0085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=snake_case_ , steps_offset=1 , ) torch.manual_seed(0 ) UpperCAmelCase__ : List[str] = AutoencoderKL() UpperCAmelCase__ : List[Any] = { # image encoding components """feature_extractor""": feature_extractor, """image_encoder""": image_encoder.eval(), # image noising components """image_normalizer""": image_normalizer.eval(), """image_noising_scheduler""": image_noising_scheduler, # regular denoising components """tokenizer""": tokenizer, """text_encoder""": text_encoder.eval(), """unet""": unet.eval(), """scheduler""": scheduler, """vae""": vae.eval(), } return components def __a ( self : str , snake_case__ : List[Any] , snake_case__ : Optional[Any]=0 , snake_case__ : str=True ): '''simple docstring''' if str(snake_case_ ).startswith("mps" ): UpperCAmelCase__ : int = torch.manual_seed(snake_case_ ) else: UpperCAmelCase__ : Tuple = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(snake_case_ ) ).to(snake_case_ ) if pil_image: UpperCAmelCase__ : Tuple = input_image * 0.5 + 0.5 UpperCAmelCase__ : Tuple = input_image.clamp(0 , 1 ) UpperCAmelCase__ : Tuple = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy() UpperCAmelCase__ : Any = DiffusionPipeline.numpy_to_pil(snake_case_ )[0] return { "prompt": "An anime racoon running a marathon", "image": input_image, "generator": generator, "num_inference_steps": 2, "output_type": "np", } @skip_mps def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : List[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator UpperCAmelCase__ : Any = self.get_dummy_components() UpperCAmelCase__ : int = StableUnCLIPImgaImgPipeline(**snake_case_ ) UpperCAmelCase__ : Any = sd_pipe.to(snake_case_ ) sd_pipe.set_progress_bar_config(disable=snake_case_ ) UpperCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(snake_case_ ) inputs.update({"image_embeds": None} ) UpperCAmelCase__ : Dict = sd_pipe(**snake_case_ ).images UpperCAmelCase__ : str = image[0, -3:, -3:, -1] assert image.shape == (1, 3_2, 3_2, 3) UpperCAmelCase__ : Union[str, Any] = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] ) assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3 def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = torch_device in ["""cpu""", """mps"""] self._test_attention_slicing_forward_pass(test_max_difference=snake_case_ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = torch_device in ["""cpu""", """mps"""] self._test_inference_batch_single_identical(test_max_difference=snake_case_ ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : List[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(test_max_difference=snake_case_ ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : List[Any] ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) UpperCAmelCase__ : Optional[int] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" ) UpperCAmelCase__ : Dict = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Any = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : str = pipe(snake_case_ , "anime turle" , generator=snake_case_ , output_type="np" ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(snake_case_ , snake_case_ ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" ) UpperCAmelCase__ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) # stable unclip will oom when integration tests are run on a V100, # so turn on memory savings pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Dict = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : str = pipe(snake_case_ , "anime turle" , generator=snake_case_ , output_type="np" ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (7_6_8, 7_6_8, 3) assert_mean_pixel_difference(snake_case_ , snake_case_ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Tuple = load_image( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" ) torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats() UpperCAmelCase__ : List[Any] = StableUnCLIPImgaImgPipeline.from_pretrained( "fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa ) UpperCAmelCase__ : List[Any] = pipe.to(snake_case_ ) pipe.set_progress_bar_config(disable=snake_case_ ) pipe.enable_attention_slicing() pipe.enable_sequential_cpu_offload() UpperCAmelCase__ : Any = pipe( snake_case_ , "anime turtle" , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[int] = torch.cuda.max_memory_allocated() # make sure that less than 7 GB is allocated assert mem_bytes < 7 * 1_0**9
371
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
0
"""simple docstring""" import collections import os from typing import List, Optional, Tuple from transformers.utils import is_jieba_available, requires_backends if is_jieba_available(): import jieba from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Any = logging.get_logger(__name__) _lowerCAmelCase : str = {"""vocab_file""": """vocab.txt"""} _lowerCAmelCase : str = { """vocab_file""": { """openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""", }, } _lowerCAmelCase : List[str] = { """openbmb/cpm-ant-10b""": 1_024, } def SCREAMING_SNAKE_CASE__ ( snake_case )-> str: '''simple docstring''' UpperCAmelCase__ : str = collections.OrderedDict() with open(__lowerCAmelCase , "r" , encoding="utf-8" ) as reader: UpperCAmelCase__ : List[Any] = reader.readlines() for index, token in enumerate(__lowerCAmelCase ): UpperCAmelCase__ : str = token.rstrip("\n" ) UpperCAmelCase__ : int = index return vocab class lowerCAmelCase__ ( __lowerCamelCase ): def __init__( self : str , snake_case__ : str , snake_case__ : int="<unk>" , snake_case__ : Tuple=2_0_0 ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = vocab UpperCAmelCase__ : str = unk_token UpperCAmelCase__ : Dict = max_input_chars_per_word def __a ( self : Tuple , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = list(__lowercase ) if len(__lowercase ) > self.max_input_chars_per_word: return [self.unk_token] UpperCAmelCase__ : List[Any] = 0 UpperCAmelCase__ : List[str] = [] while start < len(__lowercase ): UpperCAmelCase__ : Any = len(__lowercase ) UpperCAmelCase__ : Any = None while start < end: UpperCAmelCase__ : Tuple = ''''''.join(chars[start:end] ) if substr in self.vocab: UpperCAmelCase__ : Union[str, Any] = substr break end -= 1 if cur_substr is None: sub_tokens.append(self.unk_token ) start += 1 else: sub_tokens.append(__lowercase ) UpperCAmelCase__ : Union[str, Any] = end return sub_tokens class lowerCAmelCase__ ( __lowerCamelCase ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ =["""input_ids""", """attention_mask"""] SCREAMING_SNAKE_CASE_ =False def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : Dict="<d>" , snake_case__ : List[Any]="</d>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : List[str]="</s>" , snake_case__ : str="<pad>" , snake_case__ : Tuple="<unk>" , snake_case__ : Tuple="</n>" , snake_case__ : List[Any]="</_>" , snake_case__ : str="left" , **snake_case__ : Optional[Any] , ): '''simple docstring''' requires_backends(self , ["jieba"] ) super().__init__( bod_token=__lowercase , eod_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , unk_token=__lowercase , line_token=__lowercase , space_token=__lowercase , padding_side=__lowercase , **__lowercase , ) UpperCAmelCase__ : List[str] = bod_token UpperCAmelCase__ : List[Any] = eod_token UpperCAmelCase__ : List[Any] = load_vocab(__lowercase ) UpperCAmelCase__ : Any = self.encoder[space_token] UpperCAmelCase__ : Dict = self.encoder[line_token] del self.encoder[space_token] del self.encoder[line_token] UpperCAmelCase__ : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) UpperCAmelCase__ : Any = {v: k for k, v in self.encoder.items()} UpperCAmelCase__ : Any = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.encoder[self.bod_token] @property def __a ( self : Union[str, Any] ): '''simple docstring''' return self.encoder[self.eod_token] @property def __a ( self : List[str] ): '''simple docstring''' return self.encoder["\n"] @property def __a ( self : Tuple ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : str , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for x in jieba.cut(__lowercase , cut_all=__lowercase ): output_tokens.extend(self.wordpiece_tokenizer.tokenize(__lowercase ) ) return output_tokens def __a ( self : Optional[Any] , snake_case__ : Optional[Any] , **snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = [i for i in token_ids if i >= 0] UpperCAmelCase__ : Optional[int] = [ x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id ] return super()._decode(__lowercase , **__lowercase ) def __a ( self : int , snake_case__ : List[str] ): '''simple docstring''' return token in self.encoder def __a ( self : int , snake_case__ : List[str] ): '''simple docstring''' return "".join(__lowercase ) def __a ( self : Optional[int] , snake_case__ : Optional[int] ): '''simple docstring''' return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) ) def __a ( self : Tuple , snake_case__ : int ): '''simple docstring''' return self.decoder.get(__lowercase , self.unk_token ) def __a ( self : Optional[Any] , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if os.path.isdir(__lowercase ): UpperCAmelCase__ : int = os.path.join( __lowercase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) else: UpperCAmelCase__ : str = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory UpperCAmelCase__ : List[str] = 0 if " " in self.encoder: UpperCAmelCase__ : Dict = self.encoder[''' '''] del self.encoder[" "] if "\n" in self.encoder: UpperCAmelCase__ : Union[str, Any] = self.encoder['''\n'''] del self.encoder["\n"] UpperCAmelCase__ : Dict = collections.OrderedDict(sorted(self.encoder.items() , key=lambda snake_case__ : x[1] ) ) with open(__lowercase , "w" , encoding="utf-8" ) as writer: for token, token_index in self.encoder.items(): if index != token_index: logger.warning( f'Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.' " Please check that the vocabulary is not corrupted!" ) UpperCAmelCase__ : str = token_index writer.write(token + "\n" ) index += 1 return (vocab_file,) def __a ( self : Tuple , snake_case__ : List[int] , snake_case__ : List[int] = None ): '''simple docstring''' if token_ids_a is None: return [self.bos_token_id] + token_ids_a return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=__lowercase , token_ids_a=__lowercase , already_has_special_tokens=__lowercase ) if token_ids_a is not None: return [1] + ([0] * len(__lowercase )) + [1] + ([0] * len(__lowercase )) return [1] + ([0] * len(__lowercase ))
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" from dataclasses import dataclass from typing import List, Optional, Union import numpy as np import PIL import torch from transformers import CLIPImageProcessor, CLIPVisionModel from ...models import PriorTransformer from ...pipelines import DiffusionPipeline from ...schedulers import HeunDiscreteScheduler from ...utils import ( BaseOutput, is_accelerate_available, logging, randn_tensor, replace_example_docstring, ) from .renderer import ShapERenderer _lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name _lowerCAmelCase : str = """ Examples: ```py >>> from PIL import Image >>> import torch >>> from diffusers import DiffusionPipeline >>> from diffusers.utils import export_to_gif, load_image >>> device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") >>> repo = \"openai/shap-e-img2img\" >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16) >>> pipe = pipe.to(device) >>> guidance_scale = 3.0 >>> image_url = \"https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png\" >>> image = load_image(image_url).convert(\"RGB\") >>> images = pipe( ... image, ... guidance_scale=guidance_scale, ... num_inference_steps=64, ... frame_size=256, ... ).images >>> gif_path = export_to_gif(images[0], \"corgi_3d.gif\") ``` """ @dataclass class lowerCAmelCase__ ( __UpperCamelCase ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __UpperCamelCase ): def __init__( self : Tuple , snake_case__ : PriorTransformer , snake_case__ : CLIPVisionModel , snake_case__ : CLIPImageProcessor , snake_case__ : HeunDiscreteScheduler , snake_case__ : ShapERenderer , ): '''simple docstring''' super().__init__() self.register_modules( prior=snake_case__ , image_encoder=snake_case__ , image_processor=snake_case__ , scheduler=snake_case__ , renderer=snake_case__ , ) def __a ( self : Optional[int] , snake_case__ : Dict , snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : Any ): '''simple docstring''' if latents is None: UpperCAmelCase__ : Tuple = randn_tensor(snake_case__ , generator=snake_case__ , device=snake_case__ , dtype=snake_case__ ) else: if latents.shape != shape: raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' ) UpperCAmelCase__ : Dict = latents.to(snake_case__ ) UpperCAmelCase__ : Tuple = latents * scheduler.init_noise_sigma return latents def __a ( self : Tuple , snake_case__ : List[Any]=0 ): '''simple docstring''' if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("Please install accelerate via `pip install accelerate`" ) UpperCAmelCase__ : List[Any] = torch.device(f'cuda:{gpu_id}' ) UpperCAmelCase__ : Optional[int] = [self.image_encoder, self.prior] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(snake_case__ , snake_case__ ) @property def __a ( self : int ): '''simple docstring''' if self.device != torch.device("meta" ) or not hasattr(self.image_encoder , "_hf_hook" ): return self.device for module in self.image_encoder.modules(): if ( hasattr(snake_case__ , "_hf_hook" ) and hasattr(module._hf_hook , "execution_device" ) and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device ) return self.device def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ) and isinstance(image[0] , torch.Tensor ): UpperCAmelCase__ : Any = torch.cat(snake_case__ , axis=0 ) if image[0].ndim == 4 else torch.stack(snake_case__ , axis=0 ) if not isinstance(snake_case__ , torch.Tensor ): UpperCAmelCase__ : Tuple = self.image_processor(snake_case__ , return_tensors="pt" ).pixel_values[0].unsqueeze(0 ) UpperCAmelCase__ : Dict = image.to(dtype=self.image_encoder.dtype , device=snake_case__ ) UpperCAmelCase__ : List[str] = self.image_encoder(snake_case__ )['last_hidden_state'] UpperCAmelCase__ : Any = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256 UpperCAmelCase__ : Optional[int] = image_embeds.repeat_interleave(snake_case__ , dim=0 ) if do_classifier_free_guidance: UpperCAmelCase__ : Optional[int] = torch.zeros_like(snake_case__ ) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes UpperCAmelCase__ : Optional[int] = torch.cat([negative_image_embeds, image_embeds] ) return image_embeds @torch.no_grad() @replace_example_docstring(snake_case__ ) def __call__( self : Dict , snake_case__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , snake_case__ : int = 1 , snake_case__ : int = 2_5 , snake_case__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , snake_case__ : Optional[torch.FloatTensor] = None , snake_case__ : float = 4.0 , snake_case__ : int = 6_4 , snake_case__ : Optional[str] = "pil" , snake_case__ : bool = True , ): '''simple docstring''' if isinstance(snake_case__ , PIL.Image.Image ): UpperCAmelCase__ : Union[str, Any] = 1 elif isinstance(snake_case__ , torch.Tensor ): UpperCAmelCase__ : Any = image.shape[0] elif isinstance(snake_case__ , snake_case__ ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ): UpperCAmelCase__ : Any = len(snake_case__ ) else: raise ValueError( f'`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(snake_case__ )}' ) UpperCAmelCase__ : Union[str, Any] = self._execution_device UpperCAmelCase__ : Optional[Any] = batch_size * num_images_per_prompt UpperCAmelCase__ : List[Any] = guidance_scale > 1.0 UpperCAmelCase__ : Tuple = self._encode_image(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) # prior self.scheduler.set_timesteps(snake_case__ , device=snake_case__ ) UpperCAmelCase__ : Optional[Any] = self.scheduler.timesteps UpperCAmelCase__ : List[str] = self.prior.config.num_embeddings UpperCAmelCase__ : int = self.prior.config.embedding_dim UpperCAmelCase__ : List[Any] = self.prepare_latents( (batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , snake_case__ , snake_case__ , snake_case__ , self.scheduler , ) # YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim UpperCAmelCase__ : List[Any] = latents.reshape(latents.shape[0] , snake_case__ , snake_case__ ) for i, t in enumerate(self.progress_bar(snake_case__ ) ): # expand the latents if we are doing classifier free guidance UpperCAmelCase__ : int = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents UpperCAmelCase__ : str = self.scheduler.scale_model_input(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = self.prior( snake_case__ , timestep=snake_case__ , proj_embedding=snake_case__ , ).predicted_image_embedding # remove the variance UpperCAmelCase__ : Tuple = noise_pred.split( scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim if do_classifier_free_guidance is not None: UpperCAmelCase__ : Any = noise_pred.chunk(2 ) UpperCAmelCase__ : Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond) UpperCAmelCase__ : List[Any] = self.scheduler.step( snake_case__ , timestep=snake_case__ , sample=snake_case__ , ).prev_sample if output_type == "latent": return ShapEPipelineOutput(images=snake_case__ ) UpperCAmelCase__ : List[str] = [] for i, latent in enumerate(snake_case__ ): print() UpperCAmelCase__ : Dict = self.renderer.decode( latent[None, :] , snake_case__ , size=snake_case__ , ray_batch_size=4_0_9_6 , n_coarse_samples=6_4 , n_fine_samples=1_2_8 , ) images.append(snake_case__ ) UpperCAmelCase__ : List[str] = torch.stack(snake_case__ ) if output_type not in ["np", "pil"]: raise ValueError(f'Only the output types `pil` and `np` are supported not output_type={output_type}' ) UpperCAmelCase__ : Tuple = images.cpu().numpy() if output_type == "pil": UpperCAmelCase__ : Optional[int] = [self.numpy_to_pil(snake_case__ ) for image in images] # Offload last model to CPU if hasattr(self , "final_offload_hook" ) and self.final_offload_hook is not None: self.final_offload_hook.offload() if not return_dict: return (images,) return ShapEPipelineOutput(images=snake_case__ )
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) _lowerCAmelCase : Dict = { """google/canine-s""": """https://huggingface.co/google/canine-s/resolve/main/config.json""", # See all CANINE models at https://huggingface.co/models?filter=canine } class lowerCAmelCase__ ( lowerCAmelCase_ ): SCREAMING_SNAKE_CASE_ ="""canine""" def __init__( self : List[Any] , snake_case__ : Tuple=7_6_8 , snake_case__ : Tuple=1_2 , snake_case__ : Optional[Any]=1_2 , snake_case__ : Tuple=3_0_7_2 , snake_case__ : Optional[Any]="gelu" , snake_case__ : Optional[Any]=0.1 , snake_case__ : str=0.1 , snake_case__ : str=1_6_3_8_4 , snake_case__ : int=1_6 , snake_case__ : Dict=0.02 , snake_case__ : Optional[Any]=1e-12 , snake_case__ : Any=0 , snake_case__ : Any=0xe_000 , snake_case__ : Tuple=0xe_001 , snake_case__ : int=4 , snake_case__ : List[Any]=4 , snake_case__ : Optional[Any]=8 , snake_case__ : List[Any]=1_6_3_8_4 , snake_case__ : Optional[int]=1_2_8 , **snake_case__ : str , ): '''simple docstring''' super().__init__(pad_token_id=__SCREAMING_SNAKE_CASE , bos_token_id=__SCREAMING_SNAKE_CASE , eos_token_id=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) UpperCAmelCase__ : int = max_position_embeddings UpperCAmelCase__ : List[Any] = hidden_size UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Any = intermediate_size UpperCAmelCase__ : List[str] = hidden_act UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : str = initializer_range UpperCAmelCase__ : str = type_vocab_size UpperCAmelCase__ : Union[str, Any] = layer_norm_eps # Character config: UpperCAmelCase__ : List[Any] = downsampling_rate UpperCAmelCase__ : Dict = upsampling_kernel_size UpperCAmelCase__ : Optional[int] = num_hash_functions UpperCAmelCase__ : Tuple = num_hash_buckets UpperCAmelCase__ : str = local_transformer_stride
352
"""simple docstring""" import functools def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : str )-> int: '''simple docstring''' UpperCAmelCase__ : List[str] = len(snake_case ) UpperCAmelCase__ : str = len(snake_case ) @functools.cache def min_distance(snake_case : int , snake_case : int ) -> int: # if first word index is overflow - delete all from the second word if indexa >= len_worda: return len_worda - indexa # if second word index is overflow - delete all from the first word if indexa >= len_worda: return len_worda - indexa UpperCAmelCase__ : Optional[int] = int(worda[indexa] != worda[indexa] ) # current letters not identical return min( 1 + min_distance(indexa + 1 , snake_case ) , 1 + min_distance(snake_case , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , ) return min_distance(0 , 0 ) if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" import argparse import torch from datasets import load_dataset from donut import DonutModel from transformers import ( DonutImageProcessor, DonutProcessor, DonutSwinConfig, DonutSwinModel, MBartConfig, MBartForCausalLM, VisionEncoderDecoderModel, XLMRobertaTokenizerFast, ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> str: '''simple docstring''' UpperCAmelCase__ : int = model.config UpperCAmelCase__ : Optional[int] = DonutSwinConfig( image_size=original_config.input_size , patch_size=4 , depths=original_config.encoder_layer , num_heads=[4, 8, 16, 32] , window_size=original_config.window_size , embed_dim=128 , ) UpperCAmelCase__ : Dict = MBartConfig( is_decoder=lowerCamelCase__ , is_encoder_decoder=lowerCamelCase__ , add_cross_attention=lowerCamelCase__ , decoder_layers=original_config.decoder_layer , max_position_embeddings=original_config.max_position_embeddings , vocab_size=len( model.decoder.tokenizer ) , scale_embedding=lowerCamelCase__ , add_final_layer_norm=lowerCamelCase__ , ) return encoder_config, decoder_config def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> Tuple: '''simple docstring''' if "encoder.model" in name: UpperCAmelCase__ : List[Any] = name.replace("encoder.model" , "encoder" ) if "decoder.model" in name: UpperCAmelCase__ : List[Any] = name.replace("decoder.model" , "decoder" ) if "patch_embed.proj" in name: UpperCAmelCase__ : Any = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" ) if "patch_embed.norm" in name: UpperCAmelCase__ : Dict = name.replace("patch_embed.norm" , "embeddings.norm" ) if name.startswith("encoder" ): if "layers" in name: UpperCAmelCase__ : List[str] = "encoder." + name if "attn.proj" in name: UpperCAmelCase__ : Tuple = name.replace("attn.proj" , "attention.output.dense" ) if "attn" in name and "mask" not in name: UpperCAmelCase__ : List[Any] = name.replace("attn" , "attention.self" ) if "norm1" in name: UpperCAmelCase__ : Union[str, Any] = name.replace("norm1" , "layernorm_before" ) if "norm2" in name: UpperCAmelCase__ : str = name.replace("norm2" , "layernorm_after" ) if "mlp.fc1" in name: UpperCAmelCase__ : Any = name.replace("mlp.fc1" , "intermediate.dense" ) if "mlp.fc2" in name: UpperCAmelCase__ : List[Any] = name.replace("mlp.fc2" , "output.dense" ) if name == "encoder.norm.weight": UpperCAmelCase__ : str = "encoder.layernorm.weight" if name == "encoder.norm.bias": UpperCAmelCase__ : str = "encoder.layernorm.bias" return name def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[Any] )-> int: '''simple docstring''' for key in orig_state_dict.copy().keys(): UpperCAmelCase__ : int = orig_state_dict.pop(lowerCamelCase__ ) if "qkv" in key: UpperCAmelCase__ : Tuple = key.split("." ) UpperCAmelCase__ : int = int(key_split[3] ) UpperCAmelCase__ : Tuple = int(key_split[5] ) UpperCAmelCase__ : Union[str, Any] = model.encoder.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size if "weight" in key: UpperCAmelCase__ : Optional[int] = val[:dim, :] UpperCAmelCase__ : Optional[Any] = val[dim : dim * 2, :] UpperCAmelCase__ : str = val[-dim:, :] else: UpperCAmelCase__ : str = val[:dim] UpperCAmelCase__ : str = val[dim : dim * 2] UpperCAmelCase__ : Any = val[-dim:] elif "attn_mask" in key or key in ["encoder.model.norm.weight", "encoder.model.norm.bias"]: # HuggingFace implementation doesn't use attn_mask buffer # and model doesn't use final LayerNorms for the encoder pass else: UpperCAmelCase__ : List[str] = val return orig_state_dict def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple=None , snake_case : Any=False )-> Any: '''simple docstring''' UpperCAmelCase__ : int = DonutModel.from_pretrained(lowerCamelCase__ ).eval() # load HuggingFace model UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = get_configs(lowerCamelCase__ ) UpperCAmelCase__ : str = DonutSwinModel(lowerCamelCase__ ) UpperCAmelCase__ : str = MBartForCausalLM(lowerCamelCase__ ) UpperCAmelCase__ : str = VisionEncoderDecoderModel(encoder=lowerCamelCase__ , decoder=lowerCamelCase__ ) model.eval() UpperCAmelCase__ : int = original_model.state_dict() UpperCAmelCase__ : Tuple = convert_state_dict(lowerCamelCase__ , lowerCamelCase__ ) model.load_state_dict(lowerCamelCase__ ) # verify results on scanned document UpperCAmelCase__ : Optional[int] = load_dataset("hf-internal-testing/example-documents" ) UpperCAmelCase__ : Tuple = dataset["test"][0]["image"].convert("RGB" ) UpperCAmelCase__ : Any = XLMRobertaTokenizerFast.from_pretrained(lowerCamelCase__ , from_slow=lowerCamelCase__ ) UpperCAmelCase__ : Any = DonutImageProcessor( do_align_long_axis=original_model.config.align_long_axis , size=original_model.config.input_size[::-1] ) UpperCAmelCase__ : str = DonutProcessor(lowerCamelCase__ , lowerCamelCase__ ) UpperCAmelCase__ : Tuple = processor(lowerCamelCase__ , return_tensors="pt" ).pixel_values if model_name == "naver-clova-ix/donut-base-finetuned-docvqa": UpperCAmelCase__ : List[Any] = "<s_docvqa><s_question>{user_input}</s_question><s_answer>" UpperCAmelCase__ : Optional[Any] = "When is the coffee break?" UpperCAmelCase__ : str = task_prompt.replace("{user_input}" , lowerCamelCase__ ) elif model_name == "naver-clova-ix/donut-base-finetuned-rvlcdip": UpperCAmelCase__ : str = "<s_rvlcdip>" elif model_name in [ "naver-clova-ix/donut-base-finetuned-cord-v1", "naver-clova-ix/donut-base-finetuned-cord-v1-2560", ]: UpperCAmelCase__ : List[str] = "<s_cord>" elif model_name == "naver-clova-ix/donut-base-finetuned-cord-v2": UpperCAmelCase__ : Optional[Any] = "s_cord-v2>" elif model_name == "naver-clova-ix/donut-base-finetuned-zhtrainticket": UpperCAmelCase__ : Any = "<s_zhtrainticket>" elif model_name in ["naver-clova-ix/donut-proto", "naver-clova-ix/donut-base"]: # use a random prompt UpperCAmelCase__ : List[Any] = "hello world" else: raise ValueError("Model name not supported" ) UpperCAmelCase__ : Any = original_model.decoder.tokenizer(lowerCamelCase__ , add_special_tokens=lowerCamelCase__ , return_tensors="pt" )[ "input_ids" ] UpperCAmelCase__ : Union[str, Any] = original_model.encoder.model.patch_embed(lowerCamelCase__ ) UpperCAmelCase__ , UpperCAmelCase__ : str = model.encoder.embeddings(lowerCamelCase__ ) assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) # verify encoder hidden states UpperCAmelCase__ : Optional[Any] = original_model.encoder(lowerCamelCase__ ) UpperCAmelCase__ : List[Any] = model.encoder(lowerCamelCase__ ).last_hidden_state assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-2 ) # verify decoder hidden states UpperCAmelCase__ : Union[str, Any] = original_model(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ).logits UpperCAmelCase__ : int = model(lowerCamelCase__ , decoder_input_ids=lowerCamelCase__ ).logits assert torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 ) print("Looks ok!" ) if pytorch_dump_folder_path is not None: print(f'Saving model and processor to {pytorch_dump_folder_path}' ) model.save_pretrained(lowerCamelCase__ ) processor.save_pretrained(lowerCamelCase__ ) if push_to_hub: model.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) processor.push_to_hub("nielsr/" + model_name.split("/" )[-1] , commit_message="Update model" ) if __name__ == "__main__": _lowerCAmelCase : int = argparse.ArgumentParser() # Required parameters parser.add_argument( """--model_name""", default="""naver-clova-ix/donut-base-finetuned-docvqa""", required=False, type=str, help="""Name of the original model you\'d like to convert.""", ) parser.add_argument( """--pytorch_dump_folder_path""", default=None, required=False, type=str, help="""Path to the output PyTorch model directory.""", ) parser.add_argument( """--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model and processor to the 🤗 hub.""", ) _lowerCAmelCase : Any = parser.parse_args() convert_donut_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
353
"""simple docstring""" import re from pathlib import Path from unittest import TestCase import pytest @pytest.mark.integration class lowerCAmelCase__ ( __magic_name__ ): def __a ( self : List[Any] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : List[Any] = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" ) UpperCAmelCase__ : Tuple = input_file.read() UpperCAmelCase__ : Tuple = regexp.search(snake_case__ ) return match def __a ( self : List[str] , snake_case__ : str ): '''simple docstring''' with open(snake_case__ , encoding="utf-8" ) as input_file: UpperCAmelCase__ : Union[str, Any] = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL ) UpperCAmelCase__ : Dict = input_file.read() # use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search` UpperCAmelCase__ : int = regexp.finditer(snake_case__ ) UpperCAmelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None] return matches[0] if matches else None def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Path("./datasets" ) UpperCAmelCase__ : Any = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_encoding_on_file_open(str(snake_case__ ) ): raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}' ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = Path("./datasets" ) UpperCAmelCase__ : int = list(dataset_paths.absolute().glob("**/*.py" ) ) for dataset in dataset_files: if self._no_print_statements(str(snake_case__ ) ): raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.' )
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[Any] )-> int: '''simple docstring''' return x if y == 0 else greatest_common_divisor(_lowerCAmelCase , x % y ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : Dict )-> Any: '''simple docstring''' return (x * y) // greatest_common_divisor(_lowerCAmelCase , _lowerCAmelCase ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] = 20 )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : Tuple = 1 for i in range(1 , n + 1 ): UpperCAmelCase__ : Union[str, Any] = lcm(_lowerCAmelCase , _lowerCAmelCase ) return g if __name__ == "__main__": print(F"""{solution() = }""")
354
"""simple docstring""" import numpy as np import datasets _lowerCAmelCase : Optional[int] = """ Compute the Mahalanobis Distance Mahalonobis distance is the distance between a point and a distribution. And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance. It was introduced by Prof. P. C. Mahalanobis in 1936 and has been used in various statistical applications ever since [source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/] """ _lowerCAmelCase : Tuple = """\ @article{de2000mahalanobis, title={The mahalanobis distance}, author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L}, journal={Chemometrics and intelligent laboratory systems}, volume={50}, number={1}, pages={1--18}, year={2000}, publisher={Elsevier} } """ _lowerCAmelCase : Optional[int] = """ Args: X: List of datapoints to be compared with the `reference_distribution`. reference_distribution: List of datapoints from the reference distribution we want to compare to. Returns: mahalanobis: The Mahalonobis distance for each datapoint in `X`. Examples: >>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\") >>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]]) >>> print(results) {'mahalanobis': array([0.5])} """ @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Any ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "X": datasets.Sequence(datasets.Value("float" , id="sequence" ) , id="X" ), } ) , ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : Any ): '''simple docstring''' # convert to numpy arrays UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = np.array(snake_case__ ) # Assert that arrays are 2D if len(X.shape ) != 2: raise ValueError("Expected `X` to be a 2D vector" ) if len(reference_distribution.shape ) != 2: raise ValueError("Expected `reference_distribution` to be a 2D vector" ) if reference_distribution.shape[0] < 2: raise ValueError( "Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension" ) # Get mahalanobis distance for each prediction UpperCAmelCase__ : Optional[Any] = X - np.mean(snake_case__ ) UpperCAmelCase__ : Tuple = np.cov(reference_distribution.T ) try: UpperCAmelCase__ : str = np.linalg.inv(snake_case__ ) except np.linalg.LinAlgError: UpperCAmelCase__ : Optional[Any] = np.linalg.pinv(snake_case__ ) UpperCAmelCase__ : List[Any] = np.dot(snake_case__ , snake_case__ ) UpperCAmelCase__ : Tuple = np.dot(snake_case__ , X_minus_mu.T ).diagonal() return {"mahalanobis": mahal_dist}
298
0
"""simple docstring""" from __future__ import annotations from math import pi # Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of # Pi and the function _lowerCAmelCase : Union[str, Any] = 1.054571817e-34 # unit of ℏ : J * s _lowerCAmelCase : Optional[Any] = 3e8 # unit of c : m * s^-1 def SCREAMING_SNAKE_CASE__ ( snake_case : float , snake_case : float , snake_case : float )-> dict[str, float]: '''simple docstring''' if (force, area, distance).count(0 ) != 1: raise ValueError("One and only one argument must be 0" ) if force < 0: raise ValueError("Magnitude of force can not be negative" ) if distance < 0: raise ValueError("Distance can not be negative" ) if area < 0: raise ValueError("Area can not be negative" ) if force == 0: UpperCAmelCase__ : Dict = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / ( 240 * (distance) ** 4 ) return {"force": force} elif area == 0: UpperCAmelCase__ : str = (240 * force * (distance) ** 4) / ( REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 ) return {"area": area} elif distance == 0: UpperCAmelCase__ : Optional[Any] = ( (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force) ) ** (1 / 4) return {"distance": distance} raise ValueError("One and only one argument must be 0" ) # Run doctest if __name__ == "__main__": import doctest doctest.testmod()
355
"""simple docstring""" import gc import random import unittest import torch from diffusers import ( IFImgaImgPipeline, IFImgaImgSuperResolutionPipeline, IFInpaintingPipeline, IFInpaintingSuperResolutionPipeline, IFPipeline, IFSuperResolutionPipeline, ) from diffusers.models.attention_processor import AttnAddedKVProcessor from diffusers.utils.import_utils import is_xformers_available from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference from . import IFPipelineTesterMixin @skip_mps class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =IFPipeline SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''} SCREAMING_SNAKE_CASE_ =TEXT_TO_IMAGE_BATCH_PARAMS SCREAMING_SNAKE_CASE_ =PipelineTesterMixin.required_optional_params - {'''latents'''} def __a ( self : Dict ): '''simple docstring''' return self._get_dummy_components() def __a ( self : Any , snake_case__ : Dict , snake_case__ : Optional[Any]=0 ): '''simple docstring''' if str(snake_case__ ).startswith("mps" ): UpperCAmelCase__ : str = torch.manual_seed(snake_case__ ) else: UpperCAmelCase__ : Optional[int] = torch.Generator(device=snake_case__ ).manual_seed(snake_case__ ) UpperCAmelCase__ : Tuple = { "prompt": "A painting of a squirrel eating a burger", "generator": generator, "num_inference_steps": 2, "output_type": "numpy", } return inputs def __a ( self : Tuple ): '''simple docstring''' self._test_save_load_optional_components() @unittest.skipIf(torch_device != "cuda" , reason="float16 requires CUDA" ) def __a ( self : Tuple ): '''simple docstring''' # Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder super().test_save_load_floataa(expected_max_diff=1e-1 ) def __a ( self : Dict ): '''simple docstring''' self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 ) def __a ( self : int ): '''simple docstring''' self._test_save_load_local() def __a ( self : Any ): '''simple docstring''' self._test_inference_batch_single_identical( expected_max_diff=1e-2 , ) @unittest.skipIf( torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , ) def __a ( self : Optional[Any] ): '''simple docstring''' self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 ) @slow @require_torch_gpu class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : str ): '''simple docstring''' # clean up the VRAM after each test super().tearDown() gc.collect() torch.cuda.empty_cache() def __a ( self : Tuple ): '''simple docstring''' # if UpperCAmelCase__ : Any = IFPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0" , variant="fp16" , torch_dtype=torch.floataa ) UpperCAmelCase__ : Union[str, Any] = IFSuperResolutionPipeline.from_pretrained( "DeepFloyd/IF-II-L-v1.0" , variant="fp16" , torch_dtype=torch.floataa , text_encoder=snake_case__ , tokenizer=snake_case__ ) # pre compute text embeddings and remove T5 to save memory pipe_a.text_encoder.to("cuda" ) UpperCAmelCase__ , UpperCAmelCase__ : Any = pipe_a.encode_prompt("anime turtle" , device="cuda" ) del pipe_a.tokenizer del pipe_a.text_encoder gc.collect() UpperCAmelCase__ : Tuple = None UpperCAmelCase__ : List[Any] = None pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # img2img UpperCAmelCase__ : List[str] = IFImgaImgPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFImgaImgSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_imgaimg(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) pipe_a.remove_all_hooks() pipe_a.remove_all_hooks() # inpainting UpperCAmelCase__ : List[str] = IFInpaintingPipeline(**pipe_a.components ) UpperCAmelCase__ : List[str] = IFInpaintingSuperResolutionPipeline(**pipe_a.components ) pipe_a.enable_model_cpu_offload() pipe_a.enable_model_cpu_offload() pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() ) self._test_if_inpainting(snake_case__ , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : List[str] , snake_case__ : Tuple , snake_case__ : List[Any] , snake_case__ : List[str] , snake_case__ : List[Any] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : List[Any] = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_3 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : str = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Union[str, Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : Dict = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : List[str] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Tuple = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : str = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Optional[Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : List[str] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Tuple = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Optional[Any] = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : Dict = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : str = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def __a ( self : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : int , snake_case__ : Optional[int] ): '''simple docstring''' # pipeline 1 _start_torch_memory_measurement() UpperCAmelCase__ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : int = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , num_inference_steps=2 , generator=snake_case__ , output_type="np" , ) UpperCAmelCase__ : int = output.images[0] assert image.shape == (6_4, 6_4, 3) UpperCAmelCase__ : Union[str, Any] = torch.cuda.max_memory_allocated() assert mem_bytes < 1_0 * 1_0**9 UpperCAmelCase__ : int = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) # pipeline 2 _start_torch_memory_measurement() UpperCAmelCase__ : int = torch.Generator(device="cpu" ).manual_seed(0 ) UpperCAmelCase__ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : Optional[int] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case__ ) UpperCAmelCase__ : List[Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pipe_a( prompt_embeds=snake_case__ , negative_prompt_embeds=snake_case__ , image=snake_case__ , mask_image=snake_case__ , original_image=snake_case__ , generator=snake_case__ , num_inference_steps=2 , output_type="np" , ) UpperCAmelCase__ : Tuple = output.images[0] assert image.shape == (2_5_6, 2_5_6, 3) UpperCAmelCase__ : List[str] = torch.cuda.max_memory_allocated() assert mem_bytes < 4 * 1_0**9 UpperCAmelCase__ : List[Any] = load_numpy( "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy" ) assert_mean_pixel_difference(snake_case__ , snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Any: '''simple docstring''' torch.cuda.empty_cache() torch.cuda.reset_max_memory_allocated() torch.cuda.reset_peak_memory_stats()
298
0
"""simple docstring""" import argparse import shutil import time from json import JSONDecodeError from logging import getLogger from pathlib import Path from typing import Dict, List import torch from torch.utils.data import DataLoader from tqdm import tqdm from transformers import AutoModelForSeqaSeqLM, AutoTokenizer from utils import ( SeqaSeqDataset, calculate_bleu, calculate_rouge, chunks, lmap, load_json, parse_numeric_n_bool_cl_kwargs, save_json, use_task_specific_params, write_txt_file, ) _lowerCAmelCase : str = getLogger(__name__) def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : str , snake_case : str , snake_case : int = 8 , snake_case : int = 1024 , snake_case : Tuple="val" , snake_case : int=None , snake_case : Tuple=False , snake_case : Optional[int]="summarization" , snake_case : int=None , snake_case : str=1 , snake_case : Dict = None , snake_case : Union[str, Any]="" , **snake_case : Tuple , )-> List[str]: '''simple docstring''' UpperCAmelCase__ : Any = str(snake_case ) assert local_rank is not None torch.distributed.init_process_group(backend="nccl" , rank=snake_case ) UpperCAmelCase__ : Dict = Path(snake_case ) UpperCAmelCase__ : Any = save_dir.joinpath(f'rank_{local_rank}_output.json' ) torch.cuda.set_device(snake_case ) UpperCAmelCase__ : Optional[int] = AutoModelForSeqaSeqLM.from_pretrained(snake_case ).cuda() if fpaa: UpperCAmelCase__ : Optional[Any] = model.half() # determine if we need to increase num_beams use_task_specific_params(snake_case , snake_case ) # update config with task specific params UpperCAmelCase__ : List[str] = generate_kwargs.pop("num_beams" , model.config.num_beams ) # AttributeError risk? if num_return_sequences > num_beams: UpperCAmelCase__ : Tuple = num_return_sequences UpperCAmelCase__ : int = AutoTokenizer.from_pretrained(snake_case ) logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type. if max_source_length is None: UpperCAmelCase__ : List[str] = tokenizer.model_max_length if prefix is None: UpperCAmelCase__ : List[str] = prefix or getattr(model.config , "prefix" , "" ) or '' UpperCAmelCase__ : Optional[Any] = SeqaSeqDataset( snake_case , snake_case , snake_case , max_target_length=1024 , type_path=snake_case , n_obs=snake_case , prefix=snake_case , **snake_case , ) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. UpperCAmelCase__ : str = ds.make_sortish_sampler(snake_case , distributed=snake_case , add_extra_examples=snake_case , shuffle=snake_case ) UpperCAmelCase__ : Union[str, Any] = DataLoader(snake_case , sampler=snake_case , batch_size=snake_case , collate_fn=ds.collate_fn ) UpperCAmelCase__ : List[str] = [] for batch in tqdm(snake_case ): UpperCAmelCase__ : Optional[Any] = model.generate( input_ids=batch["input_ids"].to(model.device ) , attention_mask=batch["attention_mask"].to(model.device ) , num_return_sequences=snake_case , num_beams=snake_case , **snake_case , ) UpperCAmelCase__ : List[str] = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case , clean_up_tokenization_spaces=snake_case ) UpperCAmelCase__ : str = batch['ids'] if num_return_sequences > 1: UpperCAmelCase__ : Optional[Any] = chunks(snake_case , snake_case ) # batch size chunks, each of size num_return_seq for i, pred in enumerate(snake_case ): results.append({"pred": pred, "id": ids[i].item()} ) save_json(snake_case , snake_case ) return results, sampler.num_replicas def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Any = argparse.ArgumentParser( epilog="Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate" ) parser.add_argument("--data_dir" , type=snake_case , help="like cnn_dm/test.source" ) parser.add_argument( "--model_name" , type=snake_case , help="like facebook/bart-large-cnn,t5-base, etc." , default="sshleifer/distilbart-xsum-12-3" , ) parser.add_argument("--save_dir" , type=snake_case , help="where to save" , default="tmp_gen" ) parser.add_argument("--max_source_length" , type=snake_case , default=snake_case ) parser.add_argument( "--type_path" , type=snake_case , default="test" , help="which subset to evaluate typically train/val/test" ) parser.add_argument("--task" , type=snake_case , default="summarization" , help="used for task_specific_params + metrics" ) parser.add_argument("--bs" , type=snake_case , default=8 , required=snake_case , help="batch size" ) parser.add_argument( "--local_rank" , type=snake_case , default=-1 , required=snake_case , help="should be passed by distributed.launch" ) parser.add_argument( "--n_obs" , type=snake_case , default=snake_case , required=snake_case , help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences" , type=snake_case , default=1 , required=snake_case , help="How many sequences to return" ) parser.add_argument( "--sync_timeout" , type=snake_case , default=600 , required=snake_case , help="How long should master process wait for other processes to finish." , ) parser.add_argument("--src_lang" , type=snake_case , default=snake_case , required=snake_case ) parser.add_argument("--tgt_lang" , type=snake_case , default=snake_case , required=snake_case ) parser.add_argument( "--prefix" , type=snake_case , required=snake_case , default=snake_case , help="will be added to the begininng of src examples" ) parser.add_argument("--fp16" , action="store_true" ) parser.add_argument("--debug" , action="store_true" ) UpperCAmelCase__ : List[str] = time.time() UpperCAmelCase__ : Dict = parser.parse_known_args() UpperCAmelCase__ : List[str] = parse_numeric_n_bool_cl_kwargs(snake_case ) if generate_kwargs and args.local_rank <= 0: print(f'parsed the following generate kwargs: {generate_kwargs}' ) UpperCAmelCase__ : List[Any] = Path(args.save_dir + "_tmp" ) Path(snake_case ).mkdir(exist_ok=snake_case ) # this handles locking. UpperCAmelCase__ : Tuple = list(json_save_dir.glob("rank_*.json" ) ) if intermediate_files: raise ValueError(f'Found files at {json_save_dir} please move or remove them.' ) # In theory, a node could finish and save before another node hits this. If this happens, we can address later. UpperCAmelCase__ : Optional[int] = {} if args.src_lang is not None: UpperCAmelCase__ : Union[str, Any] = args.src_lang if args.tgt_lang is not None: UpperCAmelCase__ : Any = args.tgt_lang Path(args.save_dir ).mkdir(exist_ok=snake_case ) UpperCAmelCase__ : str = eval_data_dir( args.data_dir , snake_case , args.model_name , type_path=args.type_path , bs=args.bs , fpaa=args.fpaa , task=args.task , local_rank=args.local_rank , n_obs=args.n_obs , max_source_length=args.max_source_length , num_return_sequences=args.num_return_sequences , prefix=args.prefix , dataset_kwargs=snake_case , **snake_case , ) if args.local_rank <= 0: UpperCAmelCase__ : int = Path(args.save_dir ) save_dir.mkdir(exist_ok=snake_case ) UpperCAmelCase__ : int = gather_results_from_each_node(snake_case , snake_case , args.sync_timeout ) UpperCAmelCase__ : List[str] = combine_partial_results(snake_case ) if args.num_return_sequences > 1: UpperCAmelCase__ : Dict = save_dir.joinpath("pseudolabel_results.json" ) print(f'Saving aggregated results at {save_path}, intermediate in {json_save_dir}/' ) save_json(snake_case , snake_case ) return UpperCAmelCase__ : List[Any] = Path(args.data_dir ).joinpath(args.type_path + ".target" ) with open(snake_case ) as f: UpperCAmelCase__ : str = [x.rstrip() for x in f.readlines()][: len(snake_case )] # Calculate metrics, save metrics, and save _generations.txt UpperCAmelCase__ : Optional[int] = 'translation' in args.task UpperCAmelCase__ : int = calculate_bleu if calc_bleu else calculate_rouge UpperCAmelCase__ : Optional[int] = 'bleu' if calc_bleu else 'rouge' UpperCAmelCase__ : Dict = score_fn(snake_case , snake_case ) UpperCAmelCase__ : Tuple = len(snake_case ) UpperCAmelCase__ : int = time.time() - start_time UpperCAmelCase__ : Tuple = round(runtime / metrics["n_obs"] , 4 ) UpperCAmelCase__ : int = num_replicas # TODO(@stas00): add whatever metadata to metrics UpperCAmelCase__ : Union[str, Any] = save_dir.joinpath(f'{args.type_path}_{metric_name}.json' ) save_json(snake_case , snake_case , indent=snake_case ) print(snake_case ) write_txt_file(snake_case , save_dir.joinpath(f'{args.type_path}_generations.txt' ) ) if args.debug: write_txt_file(snake_case , save_dir.joinpath(f'{args.type_path}.target' ) ) else: shutil.rmtree(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> int: '''simple docstring''' UpperCAmelCase__ : Dict = [] for partial_result in partial_results: records.extend(snake_case ) UpperCAmelCase__ : Any = sorted(snake_case , key=lambda snake_case : x["id"] ) UpperCAmelCase__ : List[Any] = [x['pred'] for x in records] return preds def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : Any , snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = time.time() logger.info("waiting for all nodes to finish" ) UpperCAmelCase__ : str = None while (time.time() - start_wait) < timeout: UpperCAmelCase__ : List[str] = list(save_dir.glob("rank_*.json" ) ) if len(snake_case ) < num_replicas: continue try: # make sure all json files are fully saved UpperCAmelCase__ : List[str] = lmap(snake_case , snake_case ) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes" ) # Unreachable if __name__ == "__main__": # Usage for MT: run_generate()
356
"""simple docstring""" import os import re from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging _lowerCAmelCase : Optional[int] = logging.get_logger(__name__) _lowerCAmelCase : Optional[int] = { """vocab_file""": """vocab.txt""", """merges_file""": """bpe.codes""", } _lowerCAmelCase : List[Any] = { """vocab_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt""", }, """merges_file""": { """vinai/phobert-base""": """https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes""", """vinai/phobert-large""": """https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes""", }, } _lowerCAmelCase : int = { """vinai/phobert-base""": 256, """vinai/phobert-large""": 256, } def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = set() UpperCAmelCase__ : Optional[int] = word[0] for char in word[1:]: pairs.add((prev_char, char) ) UpperCAmelCase__ : Dict = char UpperCAmelCase__ : Tuple = set(snake_case ) return pairs class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES def __init__( self : List[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Tuple="<s>" , snake_case__ : List[Any]="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Union[str, Any]="<s>" , snake_case__ : Any="<unk>" , snake_case__ : int="<pad>" , snake_case__ : List[str]="<mask>" , **snake_case__ : Optional[int] , ): '''simple docstring''' super().__init__( bos_token=snake_case__ , eos_token=snake_case__ , unk_token=snake_case__ , sep_token=snake_case__ , cls_token=snake_case__ , pad_token=snake_case__ , mask_token=snake_case__ , **snake_case__ , ) UpperCAmelCase__ : Dict = vocab_file UpperCAmelCase__ : Tuple = merges_file UpperCAmelCase__ : List[Any] = {} UpperCAmelCase__ : Dict = 0 UpperCAmelCase__ : int = 1 UpperCAmelCase__ : Dict = 2 UpperCAmelCase__ : Dict = 3 self.add_from_file(snake_case__ ) UpperCAmelCase__ : Optional[Any] = {v: k for k, v in self.encoder.items()} with open(snake_case__ , encoding="utf-8" ) as merges_handle: UpperCAmelCase__ : Tuple = merges_handle.read().split("\n" )[:-1] UpperCAmelCase__ : Optional[Any] = [tuple(merge.split()[:-1] ) for merge in merges] UpperCAmelCase__ : List[Any] = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Dict = {} def __a ( self : int , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] UpperCAmelCase__ : Any = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : List[str] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None , snake_case__ : bool = False ): '''simple docstring''' if already_has_special_tokens: return super().get_special_tokens_mask( token_ids_a=snake_case__ , token_ids_a=snake_case__ , already_has_special_tokens=snake_case__ ) if token_ids_a is None: return [1] + ([0] * len(snake_case__ )) + [1] return [1] + ([0] * len(snake_case__ )) + [1, 1] + ([0] * len(snake_case__ )) + [1] def __a ( self : Union[str, Any] , snake_case__ : List[int] , snake_case__ : Optional[List[int]] = None ): '''simple docstring''' UpperCAmelCase__ : Tuple = [self.sep_token_id] UpperCAmelCase__ : str = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] @property def __a ( self : List[str] ): '''simple docstring''' return len(self.encoder ) def __a ( self : Any ): '''simple docstring''' return dict(self.encoder , **self.added_tokens_encoder ) def __a ( self : Dict , snake_case__ : Tuple ): '''simple docstring''' if token in self.cache: return self.cache[token] UpperCAmelCase__ : Optional[Any] = tuple(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] ) UpperCAmelCase__ : Any = get_pairs(snake_case__ ) if not pairs: return token while True: UpperCAmelCase__ : List[Any] = min(snake_case__ , key=lambda snake_case__ : self.bpe_ranks.get(snake_case__ , float("inf" ) ) ) if bigram not in self.bpe_ranks: break UpperCAmelCase__ , UpperCAmelCase__ : Tuple = bigram UpperCAmelCase__ : Optional[Any] = [] UpperCAmelCase__ : Tuple = 0 while i < len(snake_case__ ): try: UpperCAmelCase__ : Union[str, Any] = word.index(snake_case__ , snake_case__ ) except ValueError: new_word.extend(word[i:] ) break else: new_word.extend(word[i:j] ) UpperCAmelCase__ : Dict = j if word[i] == first and i < len(snake_case__ ) - 1 and word[i + 1] == second: new_word.append(first + second ) i += 2 else: new_word.append(word[i] ) i += 1 UpperCAmelCase__ : Dict = tuple(snake_case__ ) UpperCAmelCase__ : List[Any] = new_word if len(snake_case__ ) == 1: break else: UpperCAmelCase__ : Dict = get_pairs(snake_case__ ) UpperCAmelCase__ : List[Any] = "@@ ".join(snake_case__ ) UpperCAmelCase__ : Optional[int] = word[:-4] UpperCAmelCase__ : Union[str, Any] = word return word def __a ( self : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = [] UpperCAmelCase__ : int = re.findall(R"\S+\n?" , snake_case__ ) for token in words: split_tokens.extend(list(self.bpe(snake_case__ ).split(" " ) ) ) return split_tokens def __a ( self : Dict , snake_case__ : List[str] ): '''simple docstring''' return self.encoder.get(snake_case__ , self.encoder.get(self.unk_token ) ) def __a ( self : List[Any] , snake_case__ : Any ): '''simple docstring''' return self.decoder.get(snake_case__ , self.unk_token ) def __a ( self : str , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = " ".join(snake_case__ ).replace("@@ " , "" ).strip() return out_string def __a ( self : Any , snake_case__ : str , snake_case__ : Optional[str] = None ): '''simple docstring''' if not os.path.isdir(snake_case__ ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Tuple = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : str = os.path.join( snake_case__ , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case__ ): copyfile(self.vocab_file , snake_case__ ) if os.path.abspath(self.merges_file ) != os.path.abspath(snake_case__ ): copyfile(self.merges_file , snake_case__ ) return out_vocab_file, out_merge_file def __a ( self : List[Any] , snake_case__ : Union[str, Any] ): '''simple docstring''' if isinstance(snake_case__ , snake_case__ ): try: with open(snake_case__ , "r" , encoding="utf-8" ) as fd: self.add_from_file(snake_case__ ) except FileNotFoundError as fnfe: raise fnfe except UnicodeError: raise Exception(f'Incorrect encoding detected in {f}, please rebuild the dataset' ) return UpperCAmelCase__ : Dict = f.readlines() for lineTmp in lines: UpperCAmelCase__ : Optional[int] = lineTmp.strip() UpperCAmelCase__ : Tuple = line.rfind(" " ) if idx == -1: raise ValueError("Incorrect dictionary format, expected '<token> <cnt>'" ) UpperCAmelCase__ : Any = line[:idx] UpperCAmelCase__ : str = len(self.encoder )
298
0
"""simple docstring""" import os from bleurt import score # From: git+https://github.com/google-research/bleurt.git import datasets _lowerCAmelCase : Dict = datasets.logging.get_logger(__name__) _lowerCAmelCase : Dict = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n" _lowerCAmelCase : Tuple = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n" _lowerCAmelCase : Any = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n" _lowerCAmelCase : Any = { "bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip", "bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip", "bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip", "bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip", "bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip", "bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip", "BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip", "BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip", "BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip", "BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip", } @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class lowerCAmelCase__ ( datasets.Metric ): def __a ( self : Optional[Any] ): '''simple docstring''' return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/google-research/bleurt" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { "predictions": datasets.Value("string" , id="sequence" ), "references": datasets.Value("string" , id="sequence" ), } ) , codebase_urls=["https://github.com/google-research/bleurt"] , reference_urls=["https://github.com/google-research/bleurt", "https://arxiv.org/abs/2004.04696"] , ) def __a ( self : Optional[int] , snake_case__ : int ): '''simple docstring''' if self.config_name == "default": logger.warning( "Using default BLEURT-Base checkpoint for sequence maximum length 128. " "You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\')." ) UpperCAmelCase__ : List[str] = '''bleurt-base-128''' if self.config_name.lower() in CHECKPOINT_URLS: UpperCAmelCase__ : Tuple = self.config_name.lower() elif self.config_name.upper() in CHECKPOINT_URLS: UpperCAmelCase__ : Union[str, Any] = self.config_name.upper() else: raise KeyError( f'{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}' ) # download the model checkpoint specified by self.config_name and set up the scorer UpperCAmelCase__ : Dict = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] ) UpperCAmelCase__ : Any = score.BleurtScorer(os.path.join(lowerCamelCase__ , lowerCamelCase__ ) ) def __a ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.scorer.score(references=lowerCamelCase__ , candidates=lowerCamelCase__ ) return {"scores": scores}
357
"""simple docstring""" # DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 # setable values SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None @classmethod def __a ( cls : Optional[int] , snake_case__ : CommonSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray ): '''simple docstring''' return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ ) @dataclass class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ =42 class lowerCAmelCase__ ( __magic_name__ , __magic_name__ ): SCREAMING_SNAKE_CASE_ =[e.name for e in FlaxKarrasDiffusionSchedulers] SCREAMING_SNAKE_CASE_ =42 @property def __a ( self : Union[str, Any] ): '''simple docstring''' return True @register_to_config def __init__( self : Tuple , snake_case__ : int = 1_0_0_0 , snake_case__ : float = 0.0001 , snake_case__ : float = 0.02 , snake_case__ : str = "linear" , snake_case__ : Optional[jnp.ndarray] = None , snake_case__ : str = "fixed_small" , snake_case__ : bool = True , snake_case__ : str = "epsilon" , snake_case__ : jnp.dtype = jnp.floataa , ): '''simple docstring''' UpperCAmelCase__ : Tuple = dtype def __a ( self : Any , snake_case__ : Optional[CommonSchedulerState] = None ): '''simple docstring''' if common is None: UpperCAmelCase__ : Any = CommonSchedulerState.create(self ) # standard deviation of the initial noise distribution UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype ) UpperCAmelCase__ : Optional[Any] = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1] return DDPMSchedulerState.create( common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , ) def __a ( self : int , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : Optional[int] = None ): '''simple docstring''' return sample def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Tuple = () ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 UpperCAmelCase__ : Tuple = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1] return state.replace( num_inference_steps=snake_case__ , timesteps=snake_case__ , ) def __a ( self : List[str] , snake_case__ : DDPMSchedulerState , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Union[str, Any]=None ): '''simple docstring''' UpperCAmelCase__ : int = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample UpperCAmelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: UpperCAmelCase__ : Union[str, Any] = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": UpperCAmelCase__ : int = jnp.clip(snake_case__ , a_min=1e-20 ) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": UpperCAmelCase__ : Union[str, Any] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) ) elif variance_type == "fixed_large": UpperCAmelCase__ : List[Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] ) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": UpperCAmelCase__ : List[str] = variance UpperCAmelCase__ : Optional[Any] = state.common.betas[t] UpperCAmelCase__ : Any = (predicted_variance + 1) / 2 UpperCAmelCase__ : Dict = frac * max_log + (1 - frac) * min_log return variance def __a ( self : Dict , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : int , snake_case__ : jnp.ndarray , snake_case__ : Optional[jax.random.KeyArray] = None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = timestep if key is None: UpperCAmelCase__ : Optional[int] = jax.random.PRNGKey(0 ) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: UpperCAmelCase__ , UpperCAmelCase__ : Union[str, Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 ) else: UpperCAmelCase__ : int = None # 1. compute alphas, betas UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t] UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) ) UpperCAmelCase__ : List[str] = 1 - alpha_prod_t UpperCAmelCase__ : List[str] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": UpperCAmelCase__ : Optional[int] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": UpperCAmelCase__ : List[Any] = model_output elif self.config.prediction_type == "v_prediction": UpperCAmelCase__ : int = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` ' " for the FlaxDDPMScheduler." ) # 3. Clip "predicted x_0" if self.config.clip_sample: UpperCAmelCase__ : Optional[Any] = jnp.clip(snake_case__ , -1 , 1 ) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t UpperCAmelCase__ : Tuple = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf UpperCAmelCase__ : Union[str, Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): UpperCAmelCase__ : List[str] = jax.random.split(snake_case__ , num=1 ) UpperCAmelCase__ : List[str] = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype ) return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise UpperCAmelCase__ : Optional[int] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) ) UpperCAmelCase__ : Optional[Any] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ ) def __a ( self : List[Any] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __a ( self : Optional[int] , snake_case__ : DDPMSchedulerState , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , snake_case__ : jnp.ndarray , ): '''simple docstring''' return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ ) def __len__( self : Union[str, Any] ): '''simple docstring''' return self.config.num_train_timesteps
298
0
"""simple docstring""" import pytest import datasets # Import fixture modules as plugins _lowerCAmelCase : Dict = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""] def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : str )-> Optional[int]: '''simple docstring''' for item in items: if any(marker in item.keywords for marker in ["integration", "unit"] ): continue item.add_marker(pytest.mark.unit ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Any: '''simple docstring''' config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" ) @pytest.fixture(autouse=A__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Optional[int] )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = tmp_path_factory.getbasetemp() / "cache" UpperCAmelCase__ : Dict = test_hf_cache_home / "datasets" UpperCAmelCase__ : List[str] = test_hf_cache_home / "metrics" UpperCAmelCase__ : int = test_hf_cache_home / "modules" monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(A__ ) ) monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(A__ ) ) monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(A__ ) ) UpperCAmelCase__ : List[Any] = test_hf_datasets_cache / "downloads" monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(A__ ) ) UpperCAmelCase__ : List[Any] = test_hf_datasets_cache / "downloads" / "extracted" monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(A__ ) ) @pytest.fixture(autouse=A__ , scope="session" ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' datasets.disable_progress_bar() @pytest.fixture(autouse=A__ ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> List[Any]: '''simple docstring''' monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , A__ ) @pytest.fixture def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Optional[int]: '''simple docstring''' monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , A__ )
358
"""simple docstring""" import inspect import unittest from transformers import DecisionTransformerConfig, is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from ...generation.test_utils import GenerationTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import DecisionTransformerModel from transformers.models.decision_transformer.modeling_decision_transformer import ( DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ) class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Optional[Any] , snake_case__ : List[Any]=1_3 , snake_case__ : str=7 , snake_case__ : Optional[int]=6 , snake_case__ : Union[str, Any]=1_7 , snake_case__ : Optional[Any]=2_3 , snake_case__ : int=1_1 , snake_case__ : Dict=True , ): '''simple docstring''' UpperCAmelCase__ : str = parent UpperCAmelCase__ : Tuple = batch_size UpperCAmelCase__ : Dict = seq_length UpperCAmelCase__ : Union[str, Any] = act_dim UpperCAmelCase__ : Dict = state_dim UpperCAmelCase__ : Optional[Any] = hidden_size UpperCAmelCase__ : List[str] = max_length UpperCAmelCase__ : int = is_training def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, self.state_dim) ) UpperCAmelCase__ : List[Any] = floats_tensor((self.batch_size, self.seq_length, self.act_dim) ) UpperCAmelCase__ : Union[str, Any] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : Optional[int] = floats_tensor((self.batch_size, self.seq_length, 1) ) UpperCAmelCase__ : int = ids_tensor((self.batch_size, self.seq_length) , vocab_size=1_0_0_0 ) UpperCAmelCase__ : Optional[int] = random_attention_mask((self.batch_size, self.seq_length) ) UpperCAmelCase__ : Optional[int] = self.get_config() return ( config, states, actions, rewards, returns_to_go, timesteps, attention_mask, ) def __a ( self : int ): '''simple docstring''' return DecisionTransformerConfig( batch_size=self.batch_size , seq_length=self.seq_length , act_dim=self.act_dim , state_dim=self.state_dim , hidden_size=self.hidden_size , max_length=self.max_length , ) def __a ( self : Optional[Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Optional[int] , ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) self.parent.assertEqual(result.state_preds.shape , states.shape ) self.parent.assertEqual(result.action_preds.shape , actions.shape ) self.parent.assertEqual(result.return_preds.shape , returns_to_go.shape ) self.parent.assertEqual( result.last_hidden_state.shape , (self.batch_size, self.seq_length * 3, self.hidden_size) ) # seq length *3 as there are 3 modelities: states, returns and actions def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[int] = config_and_inputs UpperCAmelCase__ : Optional[int] = { "states": states, "actions": actions, "rewards": rewards, "returns_to_go": returns_to_go, "timesteps": timesteps, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =(DecisionTransformerModel,) if is_torch_available() else () SCREAMING_SNAKE_CASE_ =() SCREAMING_SNAKE_CASE_ ={'''feature-extraction''': DecisionTransformerModel} if is_torch_available() else {} # Ignoring of a failing test from GenerationTesterMixin, as the model does not use inputs_ids SCREAMING_SNAKE_CASE_ =False # Ignoring of a failing tests from ModelTesterMixin, as the model does not implement these features SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = DecisionTransformerModelTester(self ) UpperCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : List[Any] ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : List[str] ): '''simple docstring''' for model_name in DECISION_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Tuple = DecisionTransformerModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : Tuple = [*signature.parameters.keys()] UpperCAmelCase__ : str = [ "states", "actions", "rewards", "returns_to_go", "timesteps", "attention_mask", ] self.assertListEqual(arg_names[: len(snake_case__ )] , snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = 2 # number of steps of autoregressive prediction we will perform UpperCAmelCase__ : Tuple = 1_0 # defined by the RL environment, may be normalized UpperCAmelCase__ : Optional[Any] = DecisionTransformerModel.from_pretrained("edbeeching/decision-transformer-gym-hopper-expert" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Optional[int] = model.config torch.manual_seed(0 ) UpperCAmelCase__ : Optional[int] = torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ) # env.reset() UpperCAmelCase__ : Optional[Any] = torch.tensor( [[0.24_2793, -0.2869_3074, 0.874_2613], [0.6781_5274, -0.0810_1085, -0.1295_2147]] , device=snake_case__ ) UpperCAmelCase__ : List[str] = torch.tensor(snake_case__ , device=snake_case__ , dtype=torch.floataa ).reshape(1 , 1 , 1 ) UpperCAmelCase__ : Union[str, Any] = state UpperCAmelCase__ : Dict = torch.zeros(1 , 0 , config.act_dim , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Any = torch.zeros(1 , 0 , device=snake_case__ , dtype=torch.floataa ) UpperCAmelCase__ : Optional[int] = torch.tensor(0 , device=snake_case__ , dtype=torch.long ).reshape(1 , 1 ) for step in range(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.cat([actions, torch.zeros(1 , 1 , config.act_dim , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Optional[int] = torch.cat([rewards, torch.zeros(1 , 1 , device=snake_case__ )] , dim=1 ) UpperCAmelCase__ : Dict = torch.ones(1 , states.shape[1] ).to(dtype=torch.long , device=states.device ) with torch.no_grad(): UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = model( states=snake_case__ , actions=snake_case__ , rewards=snake_case__ , returns_to_go=snake_case__ , timesteps=snake_case__ , attention_mask=snake_case__ , return_dict=snake_case__ , ) self.assertEqual(action_pred.shape , actions.shape ) self.assertTrue(torch.allclose(action_pred[0, -1] , expected_outputs[step] , atol=1e-4 ) ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : str = ( # env.step(action) torch.randn(1 , 1 , config.state_dim ).to(device=snake_case__ , dtype=torch.floataa ), 1.0, False, {}, ) UpperCAmelCase__ : Union[str, Any] = action_pred[0, -1] UpperCAmelCase__ : int = torch.cat([states, state] , dim=1 ) UpperCAmelCase__ : Dict = returns_to_go[0, -1] - reward UpperCAmelCase__ : Optional[Any] = torch.cat([returns_to_go, pred_return.reshape(1 , 1 , 1 )] , dim=1 ) UpperCAmelCase__ : Tuple = torch.cat( [timesteps, torch.ones((1, 1) , device=snake_case__ , dtype=torch.long ) * (step + 1)] , dim=1 )
298
0
"""simple docstring""" from math import factorial class lowerCAmelCase__ : def __init__( self : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : Tuple = real if isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [1] * rank else: UpperCAmelCase__ : str = rank def __repr__( self : List[str] ): '''simple docstring''' return ( f'{self.real}+' f'{"+".join(str(snake_case__ )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}' ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = self.duals.copy() while cur[-1] == 0: cur.pop(-1 ) return Dual(self.real , snake_case__ ) def __add__( self : List[Any] , snake_case__ : Dict ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): return Dual(self.real + other , self.duals ) UpperCAmelCase__ : Tuple = self.duals.copy() UpperCAmelCase__ : Optional[int] = other.duals.copy() if len(snake_case__ ) > len(snake_case__ ): o_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) ) elif len(snake_case__ ) < len(snake_case__ ): s_dual.extend([1] * (len(snake_case__ ) - len(snake_case__ )) ) UpperCAmelCase__ : int = [] for i in range(len(snake_case__ ) ): new_duals.append(s_dual[i] + o_dual[i] ) return Dual(self.real + other.real , snake_case__ ) SCREAMING_SNAKE_CASE_ =__add__ def __sub__( self : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' return self + other * -1 def __mul__( self : Optional[int] , snake_case__ : Any ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Optional[int] = [] for i in self.duals: new_duals.append(i * other ) return Dual(self.real * other , snake_case__ ) UpperCAmelCase__ : Optional[int] = [0] * (len(self.duals ) + len(other.duals ) + 1) for i, item in enumerate(self.duals ): for j, jtem in enumerate(other.duals ): new_duals[i + j + 1] += item * jtem for k in range(len(self.duals ) ): new_duals[k] += self.duals[k] * other.real for index in range(len(other.duals ) ): new_duals[index] += other.duals[index] * self.real return Dual(self.real * other.real , snake_case__ ) SCREAMING_SNAKE_CASE_ =__mul__ def __truediv__( self : Any , snake_case__ : str ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : List[Any] = [] for i in self.duals: new_duals.append(i / other ) return Dual(self.real / other , snake_case__ ) raise ValueError def __floordiv__( self : List[str] , snake_case__ : Optional[Any] ): '''simple docstring''' if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [] for i in self.duals: new_duals.append(i // other ) return Dual(self.real // other , snake_case__ ) raise ValueError def __pow__( self : Dict , snake_case__ : Union[str, Any] ): '''simple docstring''' if n < 0 or isinstance(snake_case__ , snake_case__ ): raise ValueError("power must be a positive integer" ) if n == 0: return 1 if n == 1: return self UpperCAmelCase__ : Dict = self for _ in range(n - 1 ): x *= self return x def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Any , snake_case : Any )-> List[str]: '''simple docstring''' if not callable(_UpperCAmelCase ): raise ValueError("differentiate() requires a function as input for func" ) if not isinstance(_UpperCAmelCase , (float, int) ): raise ValueError("differentiate() requires a float as input for position" ) if not isinstance(_UpperCAmelCase , _UpperCAmelCase ): raise ValueError("differentiate() requires an int as input for order" ) UpperCAmelCase__ : str = Dual(_UpperCAmelCase , 1 ) UpperCAmelCase__ : Tuple = func(_UpperCAmelCase ) if order == 0: return result.real return result.duals[order - 1] * factorial(_UpperCAmelCase ) if __name__ == "__main__": import doctest doctest.testmod() def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> int: '''simple docstring''' return y**2 * y**4 print(differentiate(f, 9, 2))
359
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available _lowerCAmelCase : Tuple = {} try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Dict = ["""MLukeTokenizer"""] if TYPE_CHECKING: try: if not is_sentencepiece_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_mluke import MLukeTokenizer else: import sys _lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
298
0
"""simple docstring""" from __future__ import annotations from typing import Any class lowerCAmelCase__ : def __init__( self : Dict , snake_case__ : int , snake_case__ : int , snake_case__ : float = 0 ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = row, column UpperCAmelCase__ : List[str] = [[default_value for c in range(__UpperCamelCase )] for r in range(__UpperCamelCase )] def __str__( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = f'Matrix consist of {self.row} rows and {self.column} columns\n' # Make string identifier UpperCAmelCase__ : List[Any] = 0 for row_vector in self.array: for obj in row_vector: UpperCAmelCase__ : Union[str, Any] = max(__UpperCamelCase , len(str(__UpperCamelCase ) ) ) UpperCAmelCase__ : str = f'%{max_element_length}s' # Make string and return def single_line(snake_case__ : list[float] ) -> str: nonlocal string_format_identifier UpperCAmelCase__ : List[str] = "[" line += ", ".join(string_format_identifier % (obj,) for obj in row_vector ) line += "]" return line s += "\n".join(single_line(__UpperCamelCase ) for row_vector in self.array ) return s def __repr__( self : Union[str, Any] ): '''simple docstring''' return str(self ) def __a ( self : Tuple , snake_case__ : tuple[int, int] ): '''simple docstring''' if not (isinstance(__UpperCamelCase , (list, tuple) ) and len(__UpperCamelCase ) == 2): return False elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column): return False else: return True def __getitem__( self : Tuple , snake_case__ : tuple[int, int] ): '''simple docstring''' assert self.validate_indicies(__UpperCamelCase ) return self.array[loc[0]][loc[1]] def __setitem__( self : Dict , snake_case__ : tuple[int, int] , snake_case__ : float ): '''simple docstring''' assert self.validate_indicies(__UpperCamelCase ) UpperCAmelCase__ : Any = value def __add__( self : Dict , snake_case__ : Matrix ): '''simple docstring''' assert isinstance(__UpperCamelCase , __UpperCamelCase ) assert self.row == another.row and self.column == another.column # Add UpperCAmelCase__ : List[str] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase__ : Dict = self[r, c] + another[r, c] return result def __neg__( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase__ : Union[str, Any] = -self[r, c] return result def __sub__( self : str , snake_case__ : Matrix ): '''simple docstring''' return self + (-another) def __mul__( self : Union[str, Any] , snake_case__ : int | float | Matrix ): '''simple docstring''' if isinstance(__UpperCamelCase , (int, float) ): # Scalar multiplication UpperCAmelCase__ : Union[str, Any] = Matrix(self.row , self.column ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase__ : Optional[Any] = self[r, c] * another return result elif isinstance(__UpperCamelCase , __UpperCamelCase ): # Matrix multiplication assert self.column == another.row UpperCAmelCase__ : List[Any] = Matrix(self.row , another.column ) for r in range(self.row ): for c in range(another.column ): for i in range(self.column ): result[r, c] += self[r, i] * another[i, c] return result else: UpperCAmelCase__ : List[str] = f'Unsupported type given for another ({type(__UpperCamelCase )})' raise TypeError(__UpperCamelCase ) def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = Matrix(self.column , self.row ) for r in range(self.row ): for c in range(self.column ): UpperCAmelCase__ : Optional[int] = self[r, c] return result def __a ( self : List[Any] , snake_case__ : Matrix , snake_case__ : Matrix ): '''simple docstring''' assert isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(__UpperCamelCase , __UpperCamelCase ) assert self.row == self.column == u.row == v.row # u, v should be column vector assert u.column == v.column == 1 # u, v should be column vector # Calculate UpperCAmelCase__ : List[Any] = v.transpose() UpperCAmelCase__ : int = (v_t * self * u)[0, 0] + 1 if numerator_factor == 0: return None # It's not invertable return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor)) # Testing if __name__ == "__main__": def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' UpperCAmelCase__ : List[Any] = Matrix(3 , 3 , 0 ) for i in range(3 ): UpperCAmelCase__ : Any = 1 print(f'a^(-1) is {ainv}' ) # u, v UpperCAmelCase__ : Dict = Matrix(3 , 1 , 0 ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = 1, 2, -3 UpperCAmelCase__ : Optional[int] = Matrix(3 , 1 , 0 ) UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = 4, -2, 5 print(f'u is {u}' ) print(f'v is {v}' ) print(f'uv^T is {u * v.transpose()}' ) # Sherman Morrison print(f'(a + uv^T)^(-1) is {ainv.sherman_morrison(a__ , a__ )}' ) def SCREAMING_SNAKE_CASE__ ( )-> None: '''simple docstring''' import doctest doctest.testmod() testa()
360
"""simple docstring""" import os import re import sys import traceback import warnings from pathlib import Path from typing import Dict, Optional, Union from uuid import uuida from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami from huggingface_hub.file_download import REGEX_COMMIT_HASH from huggingface_hub.utils import ( EntryNotFoundError, RepositoryNotFoundError, RevisionNotFoundError, is_jinja_available, ) from packaging import version from requests import HTTPError from .. import __version__ from .constants import ( DEPRECATED_REVISION_ARGS, DIFFUSERS_CACHE, HUGGINGFACE_CO_RESOLVE_ENDPOINT, SAFETENSORS_WEIGHTS_NAME, WEIGHTS_NAME, ) from .import_utils import ( ENV_VARS_TRUE_VALUES, _flax_version, _jax_version, _onnxruntime_version, _torch_version, is_flax_available, is_onnx_available, is_torch_available, ) from .logging import get_logger _lowerCAmelCase : Optional[int] = get_logger(__name__) _lowerCAmelCase : Any = Path(__file__).parent / """model_card_template.md""" _lowerCAmelCase : Dict = uuida().hex _lowerCAmelCase : Optional[int] = os.getenv("""HF_HUB_OFFLINE""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : Optional[int] = os.getenv("""DISABLE_TELEMETRY""", """""").upper() in ENV_VARS_TRUE_VALUES _lowerCAmelCase : int = HUGGINGFACE_CO_RESOLVE_ENDPOINT + """/api/telemetry/""" def SCREAMING_SNAKE_CASE__ ( snake_case : Union[Dict, str, None] = None )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}' if DISABLE_TELEMETRY or HF_HUB_OFFLINE: return ua + "; telemetry/off" if is_torch_available(): ua += f'; torch/{_torch_version}' if is_flax_available(): ua += f'; jax/{_jax_version}' ua += f'; flax/{_flax_version}' if is_onnx_available(): ua += f'; onnxruntime/{_onnxruntime_version}' # CI will set this value to True if os.environ.get("DIFFUSERS_IS_CI" , "" ).upper() in ENV_VARS_TRUE_VALUES: ua += "; is_ci/true" if isinstance(snake_case , snake_case ): ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() ) elif isinstance(snake_case , snake_case ): ua += "; " + user_agent return ua def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> List[str]: '''simple docstring''' if token is None: UpperCAmelCase__ : Optional[Any] = HfFolder.get_token() if organization is None: UpperCAmelCase__ : Tuple = whoami(snake_case )["name"] return f'{username}/{model_id}' else: return f'{organization}/{model_id}' def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : List[Any] )-> List[Any]: '''simple docstring''' if not is_jinja_available(): raise ValueError( "Modelcard rendering is based on Jinja templates." " Please make sure to have `jinja` installed before using `create_model_card`." " To install it, please run `pip install Jinja2`." ) if hasattr(snake_case , "local_rank" ) and args.local_rank not in [-1, 0]: return UpperCAmelCase__ : int = args.hub_token if hasattr(snake_case , "hub_token" ) else None UpperCAmelCase__ : Optional[Any] = get_full_repo_name(snake_case , token=snake_case ) UpperCAmelCase__ : Tuple = ModelCard.from_template( card_data=ModelCardData( # Card metadata object that will be converted to YAML block language="en" , license="apache-2.0" , library_name="diffusers" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=snake_case , model_name=snake_case , repo_name=snake_case , dataset_name=args.dataset_name if hasattr(snake_case , "dataset_name" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=( args.gradient_accumulation_steps if hasattr(snake_case , "gradient_accumulation_steps" ) else None ) , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta1" ) else None , adam_betaa=args.adam_betaa if hasattr(snake_case , "adam_beta2" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(snake_case , "adam_weight_decay" ) else None , adam_epsilon=args.adam_epsilon if hasattr(snake_case , "adam_epsilon" ) else None , lr_scheduler=args.lr_scheduler if hasattr(snake_case , "lr_scheduler" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(snake_case , "lr_warmup_steps" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(snake_case , "ema_inv_gamma" ) else None , ema_power=args.ema_power if hasattr(snake_case , "ema_power" ) else None , ema_max_decay=args.ema_max_decay if hasattr(snake_case , "ema_max_decay" ) else None , mixed_precision=args.mixed_precision , ) UpperCAmelCase__ : List[str] = os.path.join(args.output_dir , "README.md" ) model_card.save(snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] , snake_case : Optional[str] = None )-> Tuple: '''simple docstring''' if resolved_file is None or commit_hash is not None: return commit_hash UpperCAmelCase__ : Dict = str(Path(snake_case ).as_posix() ) UpperCAmelCase__ : Optional[int] = re.search(r"snapshots/([^/]+)/" , snake_case ) if search is None: return None UpperCAmelCase__ : Dict = search.groups()[0] return commit_hash if REGEX_COMMIT_HASH.match(snake_case ) else None # Old default cache path, potentially to be migrated. # This logic was more or less taken from `transformers`, with the following differences: # - Diffusers doesn't use custom environment variables to specify the cache path. # - There is no need to migrate the cache format, just move the files to the new location. _lowerCAmelCase : Dict = os.path.expanduser( os.getenv("""HF_HOME""", os.path.join(os.getenv("""XDG_CACHE_HOME""", """~/.cache"""), """huggingface""")) ) _lowerCAmelCase : List[Any] = os.path.join(hf_cache_home, """diffusers""") def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[str] = None , snake_case : Optional[str] = None )-> None: '''simple docstring''' if new_cache_dir is None: UpperCAmelCase__ : Union[str, Any] = DIFFUSERS_CACHE if old_cache_dir is None: UpperCAmelCase__ : str = old_diffusers_cache UpperCAmelCase__ : List[str] = Path(snake_case ).expanduser() UpperCAmelCase__ : Any = Path(snake_case ).expanduser() for old_blob_path in old_cache_dir.glob("**/blobs/*" ): if old_blob_path.is_file() and not old_blob_path.is_symlink(): UpperCAmelCase__ : Dict = new_cache_dir / old_blob_path.relative_to(snake_case ) new_blob_path.parent.mkdir(parents=snake_case , exist_ok=snake_case ) os.replace(snake_case , snake_case ) try: os.symlink(snake_case , snake_case ) except OSError: logger.warning( "Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded." ) # At this point, old_cache_dir contains symlinks to the new cache (it can still be used). _lowerCAmelCase : Tuple = os.path.join(DIFFUSERS_CACHE, """version_diffusers_cache.txt""") if not os.path.isfile(cache_version_file): _lowerCAmelCase : Any = 0 else: with open(cache_version_file) as f: try: _lowerCAmelCase : List[str] = int(f.read()) except ValueError: _lowerCAmelCase : Optional[int] = 0 if cache_version < 1: _lowerCAmelCase : List[str] = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0 if old_cache_is_not_empty: logger.warning( """The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your """ """existing cached models. This is a one-time operation, you can interrupt it or run it """ """later by calling `diffusers.utils.hub_utils.move_cache()`.""" ) try: move_cache() except Exception as e: _lowerCAmelCase : Dict = """\n""".join(traceback.format_tb(e.__traceback__)) logger.error( F"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """ """file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole """ """message and we will do our best to help.""" ) if cache_version < 1: try: os.makedirs(DIFFUSERS_CACHE, exist_ok=True) with open(cache_version_file, """w""") as f: f.write("""1""") except Exception: logger.warning( F"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """ """the directory exists and can be written to.""" ) def SCREAMING_SNAKE_CASE__ ( snake_case : str , snake_case : Optional[str] = None )-> str: '''simple docstring''' if variant is not None: UpperCAmelCase__ : int = weights_name.split("." ) UpperCAmelCase__ : Optional[Any] = splits[:-1] + [variant] + splits[-1:] UpperCAmelCase__ : Optional[int] = ".".join(snake_case ) return weights_name def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , *, snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : Any , snake_case : Any , snake_case : Tuple , snake_case : List[str] , snake_case : Any , snake_case : Optional[int]=None , )-> Tuple: '''simple docstring''' UpperCAmelCase__ : List[str] = str(snake_case ) if os.path.isfile(snake_case ): return pretrained_model_name_or_path elif os.path.isdir(snake_case ): if os.path.isfile(os.path.join(snake_case , snake_case ) ): # Load from a PyTorch checkpoint UpperCAmelCase__ : Any = os.path.join(snake_case , snake_case ) return model_file elif subfolder is not None and os.path.isfile( os.path.join(snake_case , snake_case , snake_case ) ): UpperCAmelCase__ : str = os.path.join(snake_case , snake_case , snake_case ) return model_file else: raise EnvironmentError( f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' ) else: # 1. First check if deprecated way of loading from branches is used if ( revision in DEPRECATED_REVISION_ARGS and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME) and version.parse(version.parse(snake_case ).base_version ) >= version.parse("0.20.0" ) ): try: UpperCAmelCase__ : List[Any] = hf_hub_download( snake_case , filename=_add_variant(snake_case , snake_case ) , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) warnings.warn( f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , snake_case , ) return model_file except: # noqa: E722 warnings.warn( f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(snake_case , snake_case )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(snake_case , snake_case )}\' so that the correct variant file can be added.' , snake_case , ) try: # 2. Load model file as usual UpperCAmelCase__ : Dict = hf_hub_download( snake_case , filename=snake_case , cache_dir=snake_case , force_download=snake_case , proxies=snake_case , resume_download=snake_case , local_files_only=snake_case , use_auth_token=snake_case , user_agent=snake_case , subfolder=snake_case , revision=revision or commit_hash , ) return model_file except RepositoryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier ' "listed on 'https://huggingface.co/models'\nIf this is a private repository, make sure to pass a " "token having permission to this repo with `use_auth_token` or log in with `huggingface-cli " "login`." ) except RevisionNotFoundError: raise EnvironmentError( f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for ' "this model name. Check the model page at " f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' ) except EntryNotFoundError: raise EnvironmentError( f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' ) except HTTPError as err: raise EnvironmentError( f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' ) except ValueError: raise EnvironmentError( f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it' f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a' f' directory containing a file named {weights_name} or' " \nCheckout your internet connection or see how to run the library in" " offline mode at 'https://huggingface.co/docs/diffusers/installation#offline-mode'." ) except EnvironmentError: raise EnvironmentError( f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from ' "'https://huggingface.co/models', make sure you don't have a local directory with the same name. " f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory ' f'containing a file named {weights_name}' )
298
0
"""simple docstring""" import argparse import os import torch from transformers import FlavaConfig, FlavaForPreTraining from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> Optional[int]: '''simple docstring''' return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() ) def SCREAMING_SNAKE_CASE__ ( snake_case : Dict , snake_case : Union[str, Any] )-> Tuple: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = {} for key, value in state_dict.items(): if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key: continue UpperCAmelCase__ : str = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" ) UpperCAmelCase__ : List[str] = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" ) UpperCAmelCase__ : List[str] = key.replace("heads.cmd.itm_head.cls" , "itm_head" ) UpperCAmelCase__ : List[str] = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" ) UpperCAmelCase__ : Dict = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" ) UpperCAmelCase__ : Optional[int] = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" ) UpperCAmelCase__ : Union[str, Any] = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" ) UpperCAmelCase__ : Optional[int] = key.replace("mm_text_projection" , "flava.text_to_mm_projection" ) UpperCAmelCase__ : str = key.replace("mm_image_projection" , "flava.image_to_mm_projection" ) UpperCAmelCase__ : Dict = key.replace("image_encoder.module" , "flava.image_model" ) UpperCAmelCase__ : Optional[int] = key.replace("text_encoder.module" , "flava.text_model" ) UpperCAmelCase__ : int = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" ) UpperCAmelCase__ : Optional[Any] = key.replace("mm_encoder.module" , "flava.multimodal_model" ) UpperCAmelCase__ : Tuple = key.replace("text_projection" , "flava.text_projection" ) UpperCAmelCase__ : Union[str, Any] = key.replace("image_projection" , "flava.image_projection" ) UpperCAmelCase__ : Any = value.float() for key, value in codebook_state_dict.items(): UpperCAmelCase__ : Any = value return upgrade @torch.no_grad() def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : int , snake_case : str , snake_case : Optional[Any]=None )-> Optional[int]: '''simple docstring''' if config_path is not None: UpperCAmelCase__ : str = FlavaConfig.from_pretrained(snake_case ) else: UpperCAmelCase__ : int = FlavaConfig() UpperCAmelCase__ : str = FlavaForPreTraining(snake_case ).eval() UpperCAmelCase__ : int = convert_dalle_checkpoint(snake_case , snake_case , save_checkpoint=snake_case ) if os.path.exists(snake_case ): UpperCAmelCase__ : Dict = torch.load(snake_case , map_location="cpu" ) else: UpperCAmelCase__ : Any = torch.hub.load_state_dict_from_url(snake_case , map_location="cpu" ) UpperCAmelCase__ : Tuple = upgrade_state_dict(snake_case , snake_case ) hf_model.load_state_dict(snake_case ) UpperCAmelCase__ : str = hf_model.state_dict() UpperCAmelCase__ : List[Any] = count_parameters(snake_case ) UpperCAmelCase__ : Optional[Any] = count_parameters(snake_case ) + count_parameters(snake_case ) assert torch.allclose(snake_case , snake_case , atol=1E-3 ) hf_model.save_pretrained(snake_case ) if __name__ == "__main__": _lowerCAmelCase : List[Any] = argparse.ArgumentParser() parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""") parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""") parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""") parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""") _lowerCAmelCase : int = parser.parse_args() convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
361
"""simple docstring""" from __future__ import annotations import unittest from transformers import is_tf_available from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow if is_tf_available(): import tensorflow as tf from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM @require_tf @require_sentencepiece @require_tokenizers class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : int = AutoTokenizer.from_pretrained("google/mt5-small" ) UpperCAmelCase__ : Dict = tokenizer("Hello there" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Union[str, Any] = tokenizer("Hi I am" , return_tensors="tf" ).input_ids UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ).loss UpperCAmelCase__ : Optional[Any] = -tf.math.reduce_mean(snake_case__ ).numpy() UpperCAmelCase__ : List[Any] = -21.22_8168 self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2e-4 )
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> set: '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = set() # edges = list of graph's edges UpperCAmelCase__ : List[str] = get_edges(__lowerCAmelCase ) # While there are still elements in edges list, take an arbitrary edge # (from_node, to_node) and add his extremity to chosen_vertices and then # remove all arcs adjacent to the from_node and to_node while edges: UpperCAmelCase__ : List[str] = edges.pop() chosen_vertices.add(__lowerCAmelCase ) chosen_vertices.add(__lowerCAmelCase ) for edge in edges.copy(): if from_node in edge or to_node in edge: edges.discard(__lowerCAmelCase ) return chosen_vertices def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> set: '''simple docstring''' UpperCAmelCase__ : Tuple = set() for from_node, to_nodes in graph.items(): for to_node in to_nodes: edges.add((from_node, to_node) ) return edges if __name__ == "__main__": import doctest doctest.testmod() # graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]} # print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
362
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class lowerCAmelCase__ : def __init__( self : Optional[Any] , snake_case__ : Optional[Any] , snake_case__ : Dict=1_3 , snake_case__ : List[str]=7 , snake_case__ : Union[str, Any]=True , snake_case__ : Tuple=True , snake_case__ : Optional[int]=True , snake_case__ : Any=True , snake_case__ : Any=9_9 , snake_case__ : List[Any]=1_6 , snake_case__ : Any=3_6 , snake_case__ : Union[str, Any]=6 , snake_case__ : Tuple=6 , snake_case__ : List[str]=6 , snake_case__ : List[str]=3_7 , snake_case__ : Dict="gelu" , snake_case__ : int=0.1 , snake_case__ : List[Any]=0.1 , snake_case__ : List[str]=5_1_2 , snake_case__ : Dict=1_6 , snake_case__ : str=2 , snake_case__ : Optional[Any]=0.02 , snake_case__ : List[str]=3 , snake_case__ : Any=4 , snake_case__ : int=None , ): '''simple docstring''' UpperCAmelCase__ : Tuple = parent UpperCAmelCase__ : int = batch_size UpperCAmelCase__ : int = seq_length UpperCAmelCase__ : List[str] = is_training UpperCAmelCase__ : Union[str, Any] = use_input_mask UpperCAmelCase__ : Optional[Any] = use_token_type_ids UpperCAmelCase__ : Any = use_labels UpperCAmelCase__ : List[Any] = vocab_size UpperCAmelCase__ : Any = embedding_size UpperCAmelCase__ : List[str] = hidden_size UpperCAmelCase__ : List[Any] = num_hidden_layers UpperCAmelCase__ : int = num_hidden_groups UpperCAmelCase__ : Union[str, Any] = num_attention_heads UpperCAmelCase__ : List[str] = intermediate_size UpperCAmelCase__ : Optional[Any] = hidden_act UpperCAmelCase__ : List[Any] = hidden_dropout_prob UpperCAmelCase__ : Tuple = attention_probs_dropout_prob UpperCAmelCase__ : str = max_position_embeddings UpperCAmelCase__ : Any = type_vocab_size UpperCAmelCase__ : Union[str, Any] = type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = initializer_range UpperCAmelCase__ : Tuple = num_labels UpperCAmelCase__ : List[str] = num_choices UpperCAmelCase__ : Union[str, Any] = scope def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size ) UpperCAmelCase__ : Optional[int] = None if self.use_input_mask: UpperCAmelCase__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ) UpperCAmelCase__ : Optional[int] = None if self.use_token_type_ids: UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size ) UpperCAmelCase__ : List[Any] = None UpperCAmelCase__ : Optional[int] = None UpperCAmelCase__ : Any = None if self.use_labels: UpperCAmelCase__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels ) UpperCAmelCase__ : Dict = ids_tensor([self.batch_size] , self.num_choices ) UpperCAmelCase__ : int = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def __a ( self : Any ): '''simple docstring''' return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def __a ( self : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : List[Any] , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : str = AlbertModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , token_type_ids=snake_case__ ) UpperCAmelCase__ : Optional[int] = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) ) def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : int , snake_case__ : List[Any] , snake_case__ : str , snake_case__ : int ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForPreTraining(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , sentence_order_label=snake_case__ , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) ) def __a ( self : Union[str, Any] , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] , snake_case__ : Optional[Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = AlbertForMaskedLM(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) ) def __a ( self : Optional[int] , snake_case__ : Optional[int] , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertForQuestionAnswering(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , start_positions=snake_case__ , end_positions=snake_case__ , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) ) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) ) def __a ( self : Dict , snake_case__ : Optional[Any] , snake_case__ : List[str] , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_labels UpperCAmelCase__ : int = AlbertForSequenceClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) ) def __a ( self : str , snake_case__ : Optional[Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : Tuple ): '''simple docstring''' UpperCAmelCase__ : str = self.num_labels UpperCAmelCase__ : Any = AlbertForTokenClassification(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[str] = model(snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) ) def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Dict , snake_case__ : int , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.num_choices UpperCAmelCase__ : Optional[Any] = AlbertForMultipleChoice(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous() UpperCAmelCase__ : Tuple = model( snake_case__ , attention_mask=snake_case__ , token_type_ids=snake_case__ , labels=snake_case__ , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() ( ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ( UpperCAmelCase__ ) , ) : Optional[Any] = config_and_inputs UpperCAmelCase__ : Optional[int] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': AlbertModel, '''fill-mask''': AlbertForMaskedLM, '''question-answering''': AlbertForQuestionAnswering, '''text-classification''': AlbertForSequenceClassification, '''token-classification''': AlbertForTokenClassification, '''zero-shot''': AlbertForSequenceClassification, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =True def __a ( self : Tuple , snake_case__ : int , snake_case__ : Tuple , snake_case__ : Optional[int]=False ): '''simple docstring''' UpperCAmelCase__ : List[str] = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) if return_labels: if model_class in get_values(snake_case__ ): UpperCAmelCase__ : List[Any] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case__ ) UpperCAmelCase__ : int = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=snake_case__ ) return inputs_dict def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Dict = AlbertModelTester(self ) UpperCAmelCase__ : Any = ConfigTester(self , config_class=snake_case__ , hidden_size=3_7 ) def __a ( self : Dict ): '''simple docstring''' self.config_tester.run_common_tests() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*snake_case__ ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: UpperCAmelCase__ : Dict = type self.model_tester.create_and_check_model(*snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) @require_torch class lowerCAmelCase__ ( unittest.TestCase ): @slow def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[str] = AlbertModel.from_pretrained("albert-base-v2" ) UpperCAmelCase__ : Dict = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] ) UpperCAmelCase__ : List[str] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] ) with torch.no_grad(): UpperCAmelCase__ : int = model(snake_case__ , attention_mask=snake_case__ )[0] UpperCAmelCase__ : Dict = torch.Size((1, 1_1, 7_6_8) ) self.assertEqual(output.shape , snake_case__ ) UpperCAmelCase__ : Union[str, Any] = torch.tensor( [[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] ) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case__ , atol=1e-4 ) )
298
0
"""simple docstring""" from typing import Optional from torch import nn from .transformer_ad import TransformeraDModel, TransformeraDModelOutput class lowerCAmelCase__ ( nn.Module ): def __init__( self : Any , snake_case__ : int = 1_6 , snake_case__ : int = 8_8 , snake_case__ : Optional[int] = None , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 3_2 , snake_case__ : Optional[int] = None , snake_case__ : bool = False , snake_case__ : Optional[int] = None , snake_case__ : Optional[int] = None , snake_case__ : str = "geglu" , snake_case__ : Optional[int] = None , ): '''simple docstring''' super().__init__() UpperCAmelCase__ : List[str] = nn.ModuleList( [ TransformeraDModel( num_attention_heads=__SCREAMING_SNAKE_CASE , attention_head_dim=__SCREAMING_SNAKE_CASE , in_channels=__SCREAMING_SNAKE_CASE , num_layers=__SCREAMING_SNAKE_CASE , dropout=__SCREAMING_SNAKE_CASE , norm_num_groups=__SCREAMING_SNAKE_CASE , cross_attention_dim=__SCREAMING_SNAKE_CASE , attention_bias=__SCREAMING_SNAKE_CASE , sample_size=__SCREAMING_SNAKE_CASE , num_vector_embeds=__SCREAMING_SNAKE_CASE , activation_fn=__SCREAMING_SNAKE_CASE , num_embeds_ada_norm=__SCREAMING_SNAKE_CASE , ) for _ in range(2 ) ] ) # Variables that can be set by a pipeline: # The ratio of transformer1 to transformer2's output states to be combined during inference UpperCAmelCase__ : List[Any] = 0.5 # The shape of `encoder_hidden_states` is expected to be # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)` UpperCAmelCase__ : Dict = [7_7, 2_5_7] # Which transformer to use to encode which condition. # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])` UpperCAmelCase__ : Optional[int] = [1, 0] def __a ( self : int , snake_case__ : List[str] , snake_case__ : int , snake_case__ : Union[str, Any]=None , snake_case__ : Any=None , snake_case__ : Optional[Any]=None , snake_case__ : bool = True , ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = hidden_states UpperCAmelCase__ : List[str] = [] UpperCAmelCase__ : List[Any] = 0 # attention_mask is not used yet for i in range(2 ): # for each of the two transformers, pass the corresponding condition tokens UpperCAmelCase__ : List[str] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]] UpperCAmelCase__ : Any = self.transformer_index_for_condition[i] UpperCAmelCase__ : Tuple = self.transformers[transformer_index]( __SCREAMING_SNAKE_CASE , encoder_hidden_states=__SCREAMING_SNAKE_CASE , timestep=__SCREAMING_SNAKE_CASE , cross_attention_kwargs=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , )[0] encoded_states.append(encoded_state - input_states ) tokens_start += self.condition_lengths[i] UpperCAmelCase__ : List[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio) UpperCAmelCase__ : Tuple = output_states + input_states if not return_dict: return (output_states,) return TransformeraDModelOutput(sample=__SCREAMING_SNAKE_CASE )
363
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : Any )-> Any: '''simple docstring''' UpperCAmelCase__ : List[str] = [1] for i in range(2 , snake_case ): factorials.append(factorials[-1] * i ) assert 0 <= k < factorials[-1] * n, "k out of bounds" UpperCAmelCase__ : Union[str, Any] = [] UpperCAmelCase__ : str = list(range(snake_case ) ) # Find permutation while factorials: UpperCAmelCase__ : str = factorials.pop() UpperCAmelCase__ , UpperCAmelCase__ : int = divmod(snake_case , snake_case ) permutation.append(elements[number] ) elements.remove(elements[number] ) permutation.append(elements[0] ) return permutation if __name__ == "__main__": import doctest doctest.testmod()
298
0
"""simple docstring""" from __future__ import annotations _lowerCAmelCase : List[str] = 10 def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> list[int]: '''simple docstring''' UpperCAmelCase__ : Optional[int] = 1 UpperCAmelCase__ : Optional[int] = max(__snake_case ) while placement <= max_digit: # declare and initialize empty buckets UpperCAmelCase__ : list[list] = [[] for _ in range(__snake_case )] # split list_of_ints between the buckets for i in list_of_ints: UpperCAmelCase__ : int = int((i / placement) % RADIX ) buckets[tmp].append(__snake_case ) # put each buckets' contents into list_of_ints UpperCAmelCase__ : str = 0 for b in range(__snake_case ): for i in buckets[b]: UpperCAmelCase__ : str = i a += 1 # move to next placement *= RADIX return list_of_ints if __name__ == "__main__": import doctest doctest.testmod()
364
"""simple docstring""" import unittest import numpy as np import requests from transformers.testing_utils import require_torch, require_vision from transformers.utils import is_torch_available, is_vision_available from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs if is_torch_available(): import torch from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11 else: _lowerCAmelCase : Union[str, Any] = False if is_vision_available(): from PIL import Image from transformers import PixaStructImageProcessor class lowerCAmelCase__ ( unittest.TestCase ): def __init__( self : Dict , snake_case__ : Optional[int] , snake_case__ : List[str]=7 , snake_case__ : int=3 , snake_case__ : Any=1_8 , snake_case__ : List[Any]=3_0 , snake_case__ : int=4_0_0 , snake_case__ : Dict=None , snake_case__ : Optional[Any]=True , snake_case__ : List[str]=True , snake_case__ : Optional[Any]=None , ): '''simple docstring''' UpperCAmelCase__ : Dict = size if size is not None else {"height": 2_0, "width": 2_0} UpperCAmelCase__ : List[str] = parent UpperCAmelCase__ : List[str] = batch_size UpperCAmelCase__ : Optional[Any] = num_channels UpperCAmelCase__ : Any = image_size UpperCAmelCase__ : int = min_resolution UpperCAmelCase__ : Tuple = max_resolution UpperCAmelCase__ : Optional[int] = size UpperCAmelCase__ : Optional[int] = do_normalize UpperCAmelCase__ : str = do_convert_rgb UpperCAmelCase__ : Dict = [5_1_2, 1_0_2_4, 2_0_4_8, 4_0_9_6] UpperCAmelCase__ : Union[str, Any] = patch_size if patch_size is not None else {"height": 1_6, "width": 1_6} def __a ( self : str ): '''simple docstring''' return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb} def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg" UpperCAmelCase__ : List[str] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw ).convert("RGB" ) return raw_image @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = PixaStructImageProcessingTester(self ) @property def __a ( self : Optional[int] ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : List[str] = self.image_processor_tester.prepare_dummy_image() UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) UpperCAmelCase__ : Dict = 2_0_4_8 UpperCAmelCase__ : int = image_processor(snake_case__ , return_tensors="pt" , max_patches=snake_case__ ) self.assertTrue(torch.allclose(inputs.flattened_patches.mean() , torch.tensor(0.0606 ) , atol=1e-3 , rtol=1e-3 ) ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : List[Any] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 UpperCAmelCase__ : Optional[int] = True for max_patch in self.image_processor_tester.max_patches: # Test not batched input with self.assertRaises(snake_case__ ): UpperCAmelCase__ : List[Any] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches UpperCAmelCase__ : Optional[Any] = "Hello" UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ , header_text=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Dict ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : Any = self.image_processing_class(**self.image_processor_dict ) # create random numpy tensors UpperCAmelCase__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , numpify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , np.ndarray ) UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Dict = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : List[str] = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) def __a ( self : Optional[int] ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : int = self.image_processing_class(**self.image_processor_dict ) # create random PyTorch tensors UpperCAmelCase__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ , torchify=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , torch.Tensor ) # Test not batched input UpperCAmelCase__ : int = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * self.image_processor_tester.num_channels ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : int = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : str = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , ) @unittest.skipIf( not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , ) @require_torch @require_vision class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =PixaStructImageProcessor if is_vision_available() else None def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = PixaStructImageProcessingTester(self , num_channels=4 ) UpperCAmelCase__ : Optional[int] = 3 @property def __a ( self : int ): '''simple docstring''' return self.image_processor_tester.prepare_image_processor_dict() def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict ) self.assertTrue(hasattr(snake_case__ , "do_normalize" ) ) self.assertTrue(hasattr(snake_case__ , "do_convert_rgb" ) ) def __a ( self : int ): '''simple docstring''' # Initialize image_processor UpperCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict ) # create random PIL images UpperCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case__ ) for image in image_inputs: self.assertIsInstance(snake_case__ , Image.Image ) # Test not batched input UpperCAmelCase__ : str = ( (self.image_processor_tester.patch_size["height"] * self.image_processor_tester.patch_size["width"]) * (self.image_processor_tester.num_channels - 1) ) + 2 for max_patch in self.image_processor_tester.max_patches: # Test not batched input UpperCAmelCase__ : Optional[int] = image_processor( image_inputs[0] , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (1, max_patch, expected_hidden_dim) , ) # Test batched UpperCAmelCase__ : Dict = image_processor( snake_case__ , return_tensors="pt" , max_patches=snake_case__ ).flattened_patches self.assertEqual( encoded_images.shape , (self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) , )
298
0
"""simple docstring""" from manim import * class lowerCAmelCase__ ( __snake_case ): def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : int = Rectangle(height=0.5 , width=0.5 ) UpperCAmelCase__ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 ) UpperCAmelCase__ : List[str] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Tuple = [mem.copy() for i in range(6 )] UpperCAmelCase__ : int = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : str = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : int = VGroup(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : str = Text("CPU" , font_size=2_4 ) UpperCAmelCase__ : str = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) cpu.move_to([-2.5, -0.5, 0] ) self.add(lowerCamelCase_ ) UpperCAmelCase__ : Any = [mem.copy() for i in range(1 )] UpperCAmelCase__ : List[Any] = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : Optional[Any] = Text("GPU" , font_size=2_4 ) UpperCAmelCase__ : Dict = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) gpu.align_to(lowerCamelCase_ , lowerCamelCase_ ) gpu.set_x(gpu.get_x() - 1 ) self.add(lowerCamelCase_ ) UpperCAmelCase__ : Optional[Any] = [mem.copy() for i in range(6 )] UpperCAmelCase__ : Any = VGroup(*lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0 ) UpperCAmelCase__ : List[Any] = Text("Model" , font_size=2_4 ) UpperCAmelCase__ : Optional[int] = Group(lowerCamelCase_ , lowerCamelCase_ ).arrange(lowerCamelCase_ , buff=0.5 , aligned_edge=lowerCamelCase_ ) model.move_to([3, -1.0, 0] ) self.play( Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , Create(lowerCamelCase_ , run_time=1 ) , ) UpperCAmelCase__ : Optional[int] = MarkupText( f'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , ) UpperCAmelCase__ : Tuple = Square(side_length=2.2 ) key.move_to([-5, 2, 0] ) UpperCAmelCase__ : Union[str, Any] = MarkupText( f'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , ) key_text.move_to([-5, 2.4, 0] ) step_a.move_to([2, 2, 0] ) self.play(Write(lowerCamelCase_ , run_time=2.5 ) , Write(lowerCamelCase_ ) , Write(lowerCamelCase_ ) ) self.add(lowerCamelCase_ ) UpperCAmelCase__ : List[Any] = [] UpperCAmelCase__ : Any = [] UpperCAmelCase__ : Optional[Any] = [] for i, rect in enumerate(lowerCamelCase_ ): UpperCAmelCase__ : Optional[int] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(lowerCamelCase_ , opacity=0.7 ) cpu_target.move_to(lowerCamelCase_ ) cpu_target.generate_target() UpperCAmelCase__ : Any = 0.46 / 4 UpperCAmelCase__ : str = 0.46 / 3 if i == 0: cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=lowerCamelCase_ ) cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 ) elif i == 3: cpu_target.target.next_to(cpu_targs[0].target , direction=lowerCamelCase_ , buff=0.0 ) else: cpu_target.target.next_to(cpu_targs[i - 1].target , direction=lowerCamelCase_ , buff=0.0 ) cpu_targs.append(lowerCamelCase_ ) first_animations.append(rect.animate(run_time=0.5 ).set_stroke(lowerCamelCase_ ) ) second_animations.append(MoveToTarget(lowerCamelCase_ , run_time=1.5 ) ) self.play(*lowerCamelCase_ ) self.play(*lowerCamelCase_ ) self.wait()
365
"""simple docstring""" import importlib import os import fsspec import pytest from fsspec import register_implementation from fsspec.registry import _registry as _fsspec_registry from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem from .utils import require_lza, require_zstandard def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> Any: '''simple docstring''' assert "mock" in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' assert "mock" not in _fsspec_registry assert "bz2" in _fsspec_registry def SCREAMING_SNAKE_CASE__ ( )-> Optional[int]: '''simple docstring''' UpperCAmelCase__ : int = "mock-s3-bucket" UpperCAmelCase__ : Any = f's3://{mock_bucket}' UpperCAmelCase__ : Tuple = extract_path_from_uri(snake_case ) assert dataset_path.startswith("s3://" ) is False UpperCAmelCase__ : str = "./local/path" UpperCAmelCase__ : Union[str, Any] = extract_path_from_uri(snake_case ) assert dataset_path == new_dataset_path def SCREAMING_SNAKE_CASE__ ( snake_case : Any )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is True UpperCAmelCase__ : str = fsspec.filesystem("file" ) UpperCAmelCase__ : Optional[Any] = is_remote_filesystem(snake_case ) assert is_remote is False @pytest.mark.parametrize("compression_fs_class" , snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : Any , snake_case : List[str] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : Optional[Any] , snake_case : int )-> int: '''simple docstring''' UpperCAmelCase__ : Optional[Any] = {"gzip": gz_file, "xz": xz_file, "zstd": zstd_file, "bz2": bza_file, "lz4": lza_file} UpperCAmelCase__ : Dict = input_paths[compression_fs_class.protocol] if input_path is None: UpperCAmelCase__ : Optional[Any] = f'for \'{compression_fs_class.protocol}\' compression protocol, ' if compression_fs_class.protocol == "lz4": reason += require_lza.kwargs["reason"] elif compression_fs_class.protocol == "zstd": reason += require_zstandard.kwargs["reason"] pytest.skip(snake_case ) UpperCAmelCase__ : Optional[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=snake_case ) assert isinstance(snake_case , snake_case ) UpperCAmelCase__ : Union[str, Any] = os.path.basename(snake_case ) UpperCAmelCase__ : Optional[int] = expected_filename[: expected_filename.rindex("." )] assert fs.glob("*" ) == [expected_filename] with fs.open(snake_case , "r" , encoding="utf-8" ) as f, open(snake_case , encoding="utf-8" ) as expected_file: assert f.read() == expected_file.read() @pytest.mark.parametrize("protocol" , ["zip", "gzip"] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Dict , snake_case : Tuple )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[str] = {"zip": zip_jsonl_path, "gzip": jsonl_gz_path} UpperCAmelCase__ : int = compressed_file_paths[protocol] UpperCAmelCase__ : Any = "dataset.jsonl" UpperCAmelCase__ : Any = f'{protocol}://{member_file_path}::{compressed_file_path}' UpperCAmelCase__ , *UpperCAmelCase__ : Optional[int] = fsspec.get_fs_token_paths(snake_case ) assert fs.isfile(snake_case ) assert not fs.isfile("non_existing_" + member_file_path ) @pytest.mark.integration def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] , snake_case : Dict , snake_case : Dict , snake_case : Dict )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = hf_api.dataset_info(snake_case , token=snake_case ) UpperCAmelCase__ : str = HfFileSystem(repo_info=snake_case , token=snake_case ) assert sorted(hffs.glob("*" ) ) == [".gitattributes", "data"] assert hffs.isdir("data" ) assert hffs.isfile(".gitattributes" ) and hffs.isfile("data/text_data.txt" ) with open(snake_case ) as f: assert hffs.open("data/text_data.txt" , "r" ).read() == f.read() def SCREAMING_SNAKE_CASE__ ( )-> Union[str, Any]: '''simple docstring''' UpperCAmelCase__ : Tuple = "bz2" # Import module import datasets.filesystems # Overwrite protocol and reload register_implementation(snake_case , snake_case , clobber=snake_case ) with pytest.warns(snake_case ) as warning_info: importlib.reload(datasets.filesystems ) assert len(snake_case ) == 1 assert ( str(warning_info[0].message ) == f'A filesystem protocol was already set for {protocol} and will be overwritten.' )
298
0
"""simple docstring""" import sys import webbrowser import requests from bsa import BeautifulSoup from fake_useragent import UserAgent if __name__ == "__main__": print("""Googling.....""") _lowerCAmelCase : int = """https://www.google.com/search?q=""" + """ """.join(sys.argv[1:]) _lowerCAmelCase : str = requests.get(url, headers={"""UserAgent""": UserAgent().random}) # res.raise_for_status() with open("""project1a.html""", """wb""") as out_file: # only for knowing the class for data in res.iter_content(10_000): out_file.write(data) _lowerCAmelCase : Optional[Any] = BeautifulSoup(res.text, """html.parser""") _lowerCAmelCase : str = list(soup.select(""".eZt8xd"""))[:5] print(len(links)) for link in links: if link.text == "Maps": webbrowser.open(link.get("""href""")) else: webbrowser.open(F"""https://google.com{link.get("href")}""")
366
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : List[Any] = logging.get_logger(__name__) _lowerCAmelCase : List[Any] = { # See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''megatron-bert''' def __init__( self : Optional[Any] , snake_case__ : Dict=2_9_0_5_6 , snake_case__ : Optional[int]=1_0_2_4 , snake_case__ : int=2_4 , snake_case__ : str=1_6 , snake_case__ : Optional[Any]=4_0_9_6 , snake_case__ : List[str]="gelu" , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_1_2 , snake_case__ : str=2 , snake_case__ : List[Any]=0.02 , snake_case__ : Any=1e-12 , snake_case__ : Any=0 , snake_case__ : str="absolute" , snake_case__ : Optional[Any]=True , **snake_case__ : int , ): '''simple docstring''' super().__init__(pad_token_id=snake_case__ , **snake_case__ ) UpperCAmelCase__ : str = vocab_size UpperCAmelCase__ : str = hidden_size UpperCAmelCase__ : List[str] = num_hidden_layers UpperCAmelCase__ : Optional[int] = num_attention_heads UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Tuple = hidden_dropout_prob UpperCAmelCase__ : List[Any] = attention_probs_dropout_prob UpperCAmelCase__ : Any = max_position_embeddings UpperCAmelCase__ : Dict = type_vocab_size UpperCAmelCase__ : Optional[int] = initializer_range UpperCAmelCase__ : int = layer_norm_eps UpperCAmelCase__ : Optional[Any] = position_embedding_type UpperCAmelCase__ : Any = use_cache
298
0
"""simple docstring""" import tempfile import unittest import numpy as np from huggingface_hub import HfFolder, delete_repo from requests.exceptions import HTTPError from transformers import BertConfig, is_flax_available from transformers.testing_utils import TOKEN, USER, is_staging_test, require_flax if is_flax_available(): import os from flax.core.frozen_dict import unfreeze from flax.traverse_util import flatten_dict from transformers import FlaxBertModel _lowerCAmelCase : Optional[int] = """0.12""" # assumed parallelism: 8 @require_flax @is_staging_test class lowerCAmelCase__ ( unittest.TestCase ): @classmethod def __a ( cls : int ): '''simple docstring''' UpperCAmelCase__ : Any = TOKEN HfFolder.save_token(_UpperCAmelCase ) @classmethod def __a ( cls : Optional[Any] ): '''simple docstring''' try: delete_repo(token=cls._token , repo_id="test-model-flax" ) except HTTPError: pass try: delete_repo(token=cls._token , repo_id="valid_org/test-model-flax-org" ) except HTTPError: pass def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) UpperCAmelCase__ : Dict = FlaxBertModel(_UpperCAmelCase ) model.push_to_hub("test-model-flax" , use_auth_token=self._token ) UpperCAmelCase__ : Any = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' ) UpperCAmelCase__ : Dict = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase__ : Optional[int] = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase__ : Tuple = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id="test-model-flax" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(_UpperCAmelCase , repo_id="test-model-flax" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token ) UpperCAmelCase__ : str = FlaxBertModel.from_pretrained(f'{USER}/test-model-flax' ) UpperCAmelCase__ : str = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase__ : Any = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase__ : Union[str, Any] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BertConfig( vocab_size=9_9 , hidden_size=3_2 , num_hidden_layers=5 , num_attention_heads=4 , intermediate_size=3_7 ) UpperCAmelCase__ : List[Any] = FlaxBertModel(_UpperCAmelCase ) model.push_to_hub("valid_org/test-model-flax-org" , use_auth_token=self._token ) UpperCAmelCase__ : List[str] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) UpperCAmelCase__ : Tuple = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase__ : str = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) # Reset repo delete_repo(token=self._token , repo_id="valid_org/test-model-flax-org" ) # Push to hub via save_pretrained with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained( _UpperCAmelCase , repo_id="valid_org/test-model-flax-org" , push_to_hub=_UpperCAmelCase , use_auth_token=self._token ) UpperCAmelCase__ : List[str] = FlaxBertModel.from_pretrained("valid_org/test-model-flax-org" ) UpperCAmelCase__ : Optional[int] = flatten_dict(unfreeze(model.params ) ) UpperCAmelCase__ : Dict = flatten_dict(unfreeze(new_model.params ) ) for key in base_params.keys(): UpperCAmelCase__ : List[str] = (base_params[key] - new_params[key]).sum().item() self.assertLessEqual(_UpperCAmelCase , 1e-3 , msg=f'{key} not identical' ) def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple , snake_case : Optional[Any] )-> List[str]: '''simple docstring''' UpperCAmelCase__ : List[str] = True UpperCAmelCase__ : int = flatten_dict(modela.params ) UpperCAmelCase__ : Union[str, Any] = flatten_dict(modela.params ) for key in flat_params_a.keys(): if np.sum(np.abs(flat_params_a[key] - flat_params_a[key] ) ) > 1E-4: UpperCAmelCase__ : str = False return models_are_equal @require_flax class lowerCAmelCase__ ( unittest.TestCase ): def __a ( self : List[str] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) UpperCAmelCase__ : Optional[Any] = FlaxBertModel(_UpperCAmelCase ) UpperCAmelCase__ : Optional[Any] = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) ) with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ : Dict = FlaxBertModel.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase ) self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase ) ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BertConfig.from_pretrained("hf-internal-testing/tiny-bert-flax-only" ) UpperCAmelCase__ : Optional[int] = FlaxBertModel(_UpperCAmelCase ) UpperCAmelCase__ : List[str] = "bert" with tempfile.TemporaryDirectory() as tmp_dir: model.save_pretrained(os.path.join(_UpperCAmelCase , _UpperCAmelCase ) , max_shard_size="10KB" ) with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ : Tuple = FlaxBertModel.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ : Optional[int] = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase ) self.assertTrue(check_models_equal(_UpperCAmelCase , _UpperCAmelCase ) ) def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = "bert" UpperCAmelCase__ : str = "hf-internal-testing/tiny-random-bert-subfolder" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ : Dict = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase ) def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = "bert" UpperCAmelCase__ : str = "hf-internal-testing/tiny-random-bert-sharded-subfolder" with self.assertRaises(_UpperCAmelCase ): UpperCAmelCase__ : List[str] = FlaxBertModel.from_pretrained(_UpperCAmelCase ) UpperCAmelCase__ : Dict = FlaxBertModel.from_pretrained(_UpperCAmelCase , subfolder=_UpperCAmelCase ) self.assertIsNotNone(_UpperCAmelCase )
367
"""simple docstring""" import os import tempfile from functools import partial from unittest import TestCase from unittest.mock import patch import datasets import datasets.config from .utils import require_beam class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Dict ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=snake_case__ , ) def __a ( self : int , snake_case__ : str , snake_case__ : List[str] ): '''simple docstring''' return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )] def __a ( self : Any , snake_case__ : str , snake_case__ : str ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) class lowerCAmelCase__ ( datasets.BeamBasedBuilder ): def __a ( self : Any ): '''simple docstring''' return datasets.DatasetInfo( features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=snake_case__ , ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : int ): '''simple docstring''' return [ datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} ) ] def __a ( self : Dict , snake_case__ : List[Any] , snake_case__ : Any ): '''simple docstring''' import apache_beam as beam return pipeline | "Load Examples" >> beam.Create(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Dict: '''simple docstring''' return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )] def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )] class lowerCAmelCase__ ( __magic_name__ ): @require_beam def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Any = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : Dict ): '''simple docstring''' import apache_beam as beam UpperCAmelCase__ : Dict = beam.io.parquetio.WriteToParquet UpperCAmelCase__ : List[str] = len(get_test_dummy_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Union[str, Any] = DummyBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock: UpperCAmelCase__ : List[Any] = partial(snake_case__ , num_shards=2 ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertTrue( os.path.exists( os.path.join( snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train-00000-of-00002.arrow' ) ) ) self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) ) UpperCAmelCase__ : Dict = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) # Order is not preserved when sharding, so we just check that all the elements are there self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset @require_beam def __a ( self : str ): '''simple docstring''' with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : Optional[Any] = DummyBeamDataset(cache_dir=snake_case__ ) self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare ) @require_beam def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = len(get_test_nested_examples() ) with tempfile.TemporaryDirectory() as tmp_cache_dir: UpperCAmelCase__ : List[Any] = NestedBeamDataset(cache_dir=snake_case__ , beam_runner="DirectRunner" ) builder.download_and_prepare() self.assertTrue( os.path.exists( os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , f'{builder.name}-train.arrow' ) ) ) self.assertDictEqual( builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) ) UpperCAmelCase__ : Tuple = builder.as_dataset() self.assertEqual(dset["train"].num_rows , snake_case__ ) self.assertEqual(dset["train"].info.splits["train"].num_examples , snake_case__ ) self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] ) self.assertDictEqual( dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] ) self.assertTrue( os.path.exists(os.path.join(snake_case__ , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) ) del dset
298
0
"""simple docstring""" # Lint as: python3 import dataclasses import re from dataclasses import dataclass from functools import total_ordering from typing import Optional, Union _lowerCAmelCase : Union[str, Any] = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""") @total_ordering @dataclass class lowerCAmelCase__ : SCREAMING_SNAKE_CASE_ =42 SCREAMING_SNAKE_CASE_ =None SCREAMING_SNAKE_CASE_ =None SCREAMING_SNAKE_CASE_ =None SCREAMING_SNAKE_CASE_ =None def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = _str_to_version_tuple(self.version_str ) def __repr__( self : Union[str, Any] ): '''simple docstring''' return f'{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}' @property def __a ( self : List[Any] ): '''simple docstring''' return self.major, self.minor, self.patch def __a ( self : Union[str, Any] , snake_case__ : Any ): '''simple docstring''' if isinstance(__lowerCamelCase , __lowerCamelCase ): return Version(__lowerCamelCase ) elif isinstance(__lowerCamelCase , __lowerCamelCase ): return other raise TypeError(f'{other} (type {type(__lowerCamelCase )}) cannot be compared to version.' ) def __eq__( self : List[Any] , snake_case__ : Optional[Any] ): '''simple docstring''' try: UpperCAmelCase__ : List[Any] = self._validate_operand(__lowerCamelCase ) except (TypeError, ValueError): return False else: return self.tuple == other.tuple def __lt__( self : Tuple , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : List[str] = self._validate_operand(__lowerCamelCase ) return self.tuple < other.tuple def __hash__( self : Dict ): '''simple docstring''' return hash(_version_tuple_to_str(self.tuple ) ) @classmethod def __a ( cls : Tuple , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = {f.name for f in dataclasses.fields(cls )} return cls(**{k: v for k, v in dic.items() if k in field_names} ) def __a ( self : str ): '''simple docstring''' return self.version_str def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] )-> Dict: '''simple docstring''' UpperCAmelCase__ : Any = _VERSION_REG.match(snake_case ) if not res: raise ValueError(f'Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.' ) return tuple(int(snake_case ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] ) def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> int: '''simple docstring''' return ".".join(str(snake_case ) for v in version_tuple )
368
"""simple docstring""" import json import os import unittest from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer from transformers.testing_utils import slow from ...test_tokenization_common import TokenizerTesterMixin class lowerCAmelCase__ ( __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =XLMTokenizer SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' super().setUp() # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt UpperCAmelCase__ : Optional[int] = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ] UpperCAmelCase__ : Any = dict(zip(snake_case__ , range(len(snake_case__ ) ) ) ) UpperCAmelCase__ : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""] UpperCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] ) UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] ) with open(self.vocab_file , "w" ) as fp: fp.write(json.dumps(snake_case__ ) ) with open(self.merges_file , "w" ) as fp: fp.write("\n".join(snake_case__ ) ) def __a ( self : Union[str, Any] , snake_case__ : List[Any] ): '''simple docstring''' UpperCAmelCase__ : str = "lower newer" UpperCAmelCase__ : Optional[Any] = "lower newer" return input_text, output_text def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = XLMTokenizer(self.vocab_file , self.merges_file ) UpperCAmelCase__ : List[Any] = "lower" UpperCAmelCase__ : Any = ["low", "er</w>"] UpperCAmelCase__ : Any = tokenizer.tokenize(snake_case__ ) self.assertListEqual(snake_case__ , snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokens + ["<unk>"] UpperCAmelCase__ : List[Any] = [1_4, 1_5, 2_0] self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case__ ) , snake_case__ ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : Any = XLMTokenizer.from_pretrained("xlm-mlm-en-2048" ) UpperCAmelCase__ : str = tokenizer.encode("sequence builders" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Dict = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case__ ) UpperCAmelCase__ : Any = tokenizer.build_inputs_with_special_tokens(snake_case__ ) UpperCAmelCase__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(snake_case__ , snake_case__ ) assert encoded_sentence == [0] + text + [1] assert encoded_pair == [0] + text + [1] + text_a + [1]
298
0
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available _lowerCAmelCase : List[str] = {"""configuration_timm_backbone""": ["""TimmBackboneConfig"""]} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: _lowerCAmelCase : Any = ["""TimmBackbone"""] if TYPE_CHECKING: from .configuration_timm_backbone import TimmBackboneConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_timm_backbone import TimmBackbone else: import sys _lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
369
"""simple docstring""" import sys from typing import Tuple import numpy as np import torch from PIL import Image from torch import nn from transformers.image_utils import PILImageResampling from utils import img_tensorize class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : List[Any] , snake_case__ : str=sys.maxsize ): '''simple docstring''' UpperCAmelCase__ : Any = "bilinear" UpperCAmelCase__ : Any = max_size UpperCAmelCase__ : Any = short_edge_length def __call__( self : Dict , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = [] for img in imgs: UpperCAmelCase__ , UpperCAmelCase__ : int = img.shape[:2] # later: provide list and randomly choose index for resize UpperCAmelCase__ : Dict = np.random.randint(self.short_edge_length[0] , self.short_edge_length[1] + 1 ) if size == 0: return img UpperCAmelCase__ : Dict = size * 1.0 / min(snake_case__ , snake_case__ ) if h < w: UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = size, scale * w else: UpperCAmelCase__ , UpperCAmelCase__ : int = scale * h, size if max(snake_case__ , snake_case__ ) > self.max_size: UpperCAmelCase__ : Union[str, Any] = self.max_size * 1.0 / max(snake_case__ , snake_case__ ) UpperCAmelCase__ : List[str] = newh * scale UpperCAmelCase__ : int = neww * scale UpperCAmelCase__ : List[Any] = int(neww + 0.5 ) UpperCAmelCase__ : Optional[Any] = int(newh + 0.5 ) if img.dtype == np.uinta: UpperCAmelCase__ : Any = Image.fromarray(snake_case__ ) UpperCAmelCase__ : Union[str, Any] = pil_image.resize((neww, newh) , PILImageResampling.BILINEAR ) UpperCAmelCase__ : Optional[int] = np.asarray(snake_case__ ) else: UpperCAmelCase__ : Any = img.permute(2 , 0 , 1 ).unsqueeze(0 ) # 3, 0, 1) # hw(c) -> nchw UpperCAmelCase__ : Tuple = nn.functional.interpolate( snake_case__ , (newh, neww) , mode=self.interp_method , align_corners=snake_case__ ).squeeze(0 ) img_augs.append(snake_case__ ) return img_augs class lowerCAmelCase__ : def __init__( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = ResizeShortestEdge([cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST] , cfg.INPUT.MAX_SIZE_TEST ) UpperCAmelCase__ : Any = cfg.INPUT.FORMAT UpperCAmelCase__ : Optional[Any] = cfg.SIZE_DIVISIBILITY UpperCAmelCase__ : str = cfg.PAD_VALUE UpperCAmelCase__ : List[Any] = cfg.INPUT.MAX_SIZE_TEST UpperCAmelCase__ : Dict = cfg.MODEL.DEVICE UpperCAmelCase__ : Optional[int] = torch.tensor(cfg.MODEL.PIXEL_STD ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : str = torch.tensor(cfg.MODEL.PIXEL_MEAN ).to(self.device ).view(len(cfg.MODEL.PIXEL_STD ) , 1 , 1 ) UpperCAmelCase__ : List[str] = lambda snake_case__ : (x - self.pixel_mean) / self.pixel_std def __a ( self : Optional[int] , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = tuple(max(snake_case__ ) for s in zip(*[img.shape for img in images] ) ) UpperCAmelCase__ : Tuple = [im.shape[-2:] for im in images] UpperCAmelCase__ : int = [ nn.functional.pad( snake_case__ , [0, max_size[-1] - size[1], 0, max_size[-2] - size[0]] , value=self.pad_value , ) for size, im in zip(snake_case__ , snake_case__ ) ] return torch.stack(snake_case__ ), torch.tensor(snake_case__ ) def __call__( self : str , snake_case__ : int , snake_case__ : int=False ): '''simple docstring''' with torch.no_grad(): if not isinstance(snake_case__ , snake_case__ ): UpperCAmelCase__ : Dict = [images] if single_image: assert len(snake_case__ ) == 1 for i in range(len(snake_case__ ) ): if isinstance(images[i] , torch.Tensor ): images.insert(snake_case__ , images.pop(snake_case__ ).to(self.device ).float() ) elif not isinstance(images[i] , torch.Tensor ): images.insert( snake_case__ , torch.as_tensor(img_tensorize(images.pop(snake_case__ ) , input_format=self.input_format ) ) .to(self.device ) .float() , ) # resize smallest edge UpperCAmelCase__ : Optional[Any] = torch.tensor([im.shape[:2] for im in images] ) UpperCAmelCase__ : Tuple = self.aug(snake_case__ ) # transpose images and convert to torch tensors # images = [torch.as_tensor(i.astype("float32")).permute(2, 0, 1).to(self.device) for i in images] # now normalize before pad to avoid useless arithmetic UpperCAmelCase__ : Optional[int] = [self.normalizer(snake_case__ ) for x in images] # now pad them to do the following operations UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = self.pad(snake_case__ ) # Normalize if self.size_divisibility > 0: raise NotImplementedError() # pad UpperCAmelCase__ : Tuple = torch.true_divide(snake_case__ , snake_case__ ) if single_image: return images[0], sizes[0], scales_yx[0] else: return images, sizes, scales_yx def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] , snake_case : str )-> List[Any]: '''simple docstring''' boxes[:, 0::2] *= scale_yx[:, 1] boxes[:, 1::2] *= scale_yx[:, 0] return boxes def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Tuple[int, int] )-> int: '''simple docstring''' assert torch.isfinite(snake_case ).all(), "Box tensor contains infinite or NaN!" UpperCAmelCase__ , UpperCAmelCase__ : Dict = box_size tensor[:, 0].clamp_(min=0 , max=snake_case ) tensor[:, 1].clamp_(min=0 , max=snake_case ) tensor[:, 2].clamp_(min=0 , max=snake_case ) tensor[:, 3].clamp_(min=0 , max=snake_case )
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case : list[int] )-> list[list[int]]: '''simple docstring''' UpperCAmelCase__ : List[str] = [] if len(_lowerCamelCase ) == 1: return [nums.copy()] for _ in range(len(_lowerCamelCase ) ): UpperCAmelCase__ : Tuple = nums.pop(0 ) UpperCAmelCase__ : Optional[int] = permute(_lowerCamelCase ) for perm in permutations: perm.append(_lowerCamelCase ) result.extend(_lowerCamelCase ) nums.append(_lowerCamelCase ) return result def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' def backtrack(snake_case : Optional[int] ): if start == len(_lowerCamelCase ) - 1: output.append(nums[:] ) else: for i in range(_lowerCamelCase , len(_lowerCamelCase ) ): UpperCAmelCase__ : Union[str, Any] = nums[i], nums[start] backtrack(start + 1 ) UpperCAmelCase__ : List[str] = nums[i], nums[start] # backtrack UpperCAmelCase__ : Optional[int] = [] backtrack(0 ) return output if __name__ == "__main__": import doctest # use res to print the data in permute2 function _lowerCAmelCase : str = permutea([1, 2, 3]) print(res) doctest.testmod()
370
"""simple docstring""" import qiskit def SCREAMING_SNAKE_CASE__ ( snake_case : int , snake_case : int )-> qiskit.result.counts.Counts: '''simple docstring''' UpperCAmelCase__ : str = qiskit.Aer.get_backend("aer_simulator" ) UpperCAmelCase__ : Optional[int] = qiskit.QuantumCircuit(4 , 2 ) # encode inputs in qubits 0 and 1 if bita == 1: qc_ha.x(0 ) if bita == 1: qc_ha.x(1 ) qc_ha.barrier() # use cnots to write XOR of the inputs on qubit2 qc_ha.cx(0 , 2 ) qc_ha.cx(1 , 2 ) # use ccx / toffoli gate to write AND of the inputs on qubit3 qc_ha.ccx(0 , 1 , 3 ) qc_ha.barrier() # extract outputs qc_ha.measure(2 , 0 ) # extract XOR value qc_ha.measure(3 , 1 ) # extract AND value # Execute the circuit on the qasm simulator UpperCAmelCase__ : Optional[int] = qiskit.execute(snake_case , snake_case , shots=1000 ) # Return the histogram data of the results of the experiment return job.result().get_counts(snake_case ) if __name__ == "__main__": _lowerCAmelCase : Optional[Any] = half_adder(1, 1) print(F"""Half Adder Output Qubit Counts: {counts}""")
298
0
"""simple docstring""" import argparse import hashlib # hashlib is only used inside the Test class import struct class lowerCAmelCase__ : def __init__( self : str , snake_case__ : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = data UpperCAmelCase__ : Tuple = [0x67_452_301, 0xef_cda_b89, 0x98_bad_cfe, 0x10_325_476, 0xc3_d2e_1f0] @staticmethod def __a ( snake_case__ : Any , snake_case__ : str ): '''simple docstring''' return ((n << b) | (n >> (3_2 - b))) & 0xff_fff_fff def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : str = B'''\x80''' + B'''\x00''' * (6_3 - (len(self.data ) + 8) % 6_4) UpperCAmelCase__ : Any = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) ) return padded_data def __a ( self : Union[str, Any] ): '''simple docstring''' return [ self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 ) ] def __a ( self : Any , snake_case__ : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Tuple = list(struct.unpack(">16L" , _SCREAMING_SNAKE_CASE ) ) + [0] * 6_4 for i in range(1_6 , 8_0 ): UpperCAmelCase__ : str = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 ) return w def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : str = self.padding() UpperCAmelCase__ : Optional[int] = self.split_blocks() for block in self.blocks: UpperCAmelCase__ : Optional[int] = self.expand_block(_SCREAMING_SNAKE_CASE ) UpperCAmelCase__ : List[str] = self.h for i in range(0 , 8_0 ): if 0 <= i < 2_0: UpperCAmelCase__ : List[Any] = (b & c) | ((~b) & d) UpperCAmelCase__ : Any = 0x5a_827_999 elif 2_0 <= i < 4_0: UpperCAmelCase__ : Union[str, Any] = b ^ c ^ d UpperCAmelCase__ : int = 0x6e_d9e_ba1 elif 4_0 <= i < 6_0: UpperCAmelCase__ : int = (b & c) | (b & d) | (c & d) UpperCAmelCase__ : Tuple = 0x8f_1bb_cdc elif 6_0 <= i < 8_0: UpperCAmelCase__ : Any = b ^ c ^ d UpperCAmelCase__ : Dict = 0xca_62c_1d6 UpperCAmelCase__ : Tuple = ( self.rotate(_SCREAMING_SNAKE_CASE , 5 ) + f + e + k + expanded_block[i] & 0xff_fff_fff, a, self.rotate(_SCREAMING_SNAKE_CASE , 3_0 ), c, d, ) UpperCAmelCase__ : List[str] = ( self.h[0] + a & 0xff_fff_fff, self.h[1] + b & 0xff_fff_fff, self.h[2] + c & 0xff_fff_fff, self.h[3] + d & 0xff_fff_fff, self.h[4] + e & 0xff_fff_fff, ) return ("{:08x}" * 5).format(*self.h ) def SCREAMING_SNAKE_CASE__ ( )-> str: '''simple docstring''' UpperCAmelCase__ : Dict = B'''Test String''' assert SHAaHash(snake_case ).final_hash() == hashlib.shaa(snake_case ).hexdigest() # noqa: S324 def SCREAMING_SNAKE_CASE__ ( )-> List[Any]: '''simple docstring''' UpperCAmelCase__ : int = argparse.ArgumentParser(description="Process some strings or files" ) parser.add_argument( "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , ) parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" ) UpperCAmelCase__ : Tuple = parser.parse_args() UpperCAmelCase__ : int = args.input_string # In any case hash input should be a bytestring if args.input_file: with open(args.input_file , "rb" ) as f: UpperCAmelCase__ : List[Any] = f.read() else: UpperCAmelCase__ : str = bytes(snake_case , "utf-8" ) print(SHAaHash(snake_case ).final_hash() ) if __name__ == "__main__": main() import doctest doctest.testmod()
371
"""simple docstring""" from typing import List from ...configuration_utils import PretrainedConfig from ...utils import logging _lowerCAmelCase : Dict = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = { """snap-research/efficientformer-l1-300""": ( """https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json""" ), } class lowerCAmelCase__ ( __magic_name__ ): SCREAMING_SNAKE_CASE_ ='''efficientformer''' def __init__( self : List[Any] , snake_case__ : List[int] = [3, 2, 6, 4] , snake_case__ : List[int] = [4_8, 9_6, 2_2_4, 4_4_8] , snake_case__ : List[bool] = [True, True, True, True] , snake_case__ : int = 4_4_8 , snake_case__ : int = 3_2 , snake_case__ : int = 4 , snake_case__ : int = 7 , snake_case__ : int = 5 , snake_case__ : int = 8 , snake_case__ : int = 4 , snake_case__ : float = 0.0 , snake_case__ : int = 1_6 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : float = 0.0 , snake_case__ : int = 1 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : float = 1e-5 , snake_case__ : str = "gelu" , snake_case__ : float = 0.02 , snake_case__ : float = 1e-12 , snake_case__ : int = 2_2_4 , snake_case__ : float = 1e-05 , **snake_case__ : str , ): '''simple docstring''' super().__init__(**snake_case__ ) UpperCAmelCase__ : int = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : List[str] = hidden_sizes UpperCAmelCase__ : Union[str, Any] = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : List[Any] = initializer_range UpperCAmelCase__ : List[Any] = layer_norm_eps UpperCAmelCase__ : Optional[int] = patch_size UpperCAmelCase__ : Tuple = num_channels UpperCAmelCase__ : Optional[int] = depths UpperCAmelCase__ : Union[str, Any] = mlp_expansion_ratio UpperCAmelCase__ : Dict = downsamples UpperCAmelCase__ : Any = dim UpperCAmelCase__ : str = key_dim UpperCAmelCase__ : List[Any] = attention_ratio UpperCAmelCase__ : Optional[Any] = resolution UpperCAmelCase__ : Optional[Any] = pool_size UpperCAmelCase__ : Any = downsample_patch_size UpperCAmelCase__ : int = downsample_stride UpperCAmelCase__ : Dict = downsample_pad UpperCAmelCase__ : List[Any] = drop_path_rate UpperCAmelCase__ : Optional[Any] = num_metaad_blocks UpperCAmelCase__ : List[str] = distillation UpperCAmelCase__ : Dict = use_layer_scale UpperCAmelCase__ : List[Any] = layer_scale_init_value UpperCAmelCase__ : Optional[Any] = image_size UpperCAmelCase__ : Optional[int] = batch_norm_eps
298
0
"""simple docstring""" def SCREAMING_SNAKE_CASE__ ( snake_case )-> Optional[Any]: '''simple docstring''' if length <= 0 or not isinstance(lowerCamelCase__ , lowerCamelCase__ ): raise ValueError("Length must be a positive integer." ) return [n * (2 * n - 1) for n in range(lowerCamelCase__ )] if __name__ == "__main__": print(hexagonal_numbers(length=5)) print(hexagonal_numbers(length=10))
350
"""simple docstring""" import argparse import re from typing import Dict import torch from datasets import Audio, Dataset, load_dataset, load_metric from transformers import AutoFeatureExtractor, pipeline def SCREAMING_SNAKE_CASE__ ( snake_case : Dataset , snake_case : Dict[str, str] )-> Any: '''simple docstring''' UpperCAmelCase__ : str = args.log_outputs UpperCAmelCase__ : str = "_".join(args.dataset.split("/" ) + [args.config, args.split] ) # load metric UpperCAmelCase__ : List[str] = load_metric("wer" ) UpperCAmelCase__ : Tuple = load_metric("cer" ) # compute metrics UpperCAmelCase__ : List[str] = wer.compute(references=result["target"] , predictions=result["prediction"] ) UpperCAmelCase__ : Tuple = cer.compute(references=result["target"] , predictions=result["prediction"] ) # print & log results UpperCAmelCase__ : Union[str, Any] = f'WER: {wer_result}\nCER: {cer_result}' print(snake_case ) with open(f'{dataset_id}_eval_results.txt' , "w" ) as f: f.write(snake_case ) # log all results in text file. Possibly interesting for analysis if log_outputs is not None: UpperCAmelCase__ : str = f'log_{dataset_id}_predictions.txt' UpperCAmelCase__ : List[str] = f'log_{dataset_id}_targets.txt' with open(snake_case , "w" ) as p, open(snake_case , "w" ) as t: # mapping function to write output def write_to_file(snake_case : List[Any] , snake_case : List[str] ): p.write(f'{i}' + "\n" ) p.write(batch["prediction"] + "\n" ) t.write(f'{i}' + "\n" ) t.write(batch["target"] + "\n" ) result.map(snake_case , with_indices=snake_case ) def SCREAMING_SNAKE_CASE__ ( snake_case : str )-> str: '''simple docstring''' UpperCAmelCase__ : str = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training UpperCAmelCase__ : str = re.sub(snake_case , "" , text.lower() ) # In addition, we can normalize the target text, e.g. removing new lines characters etc... # note that order is important here! UpperCAmelCase__ : Tuple = ["\n\n", "\n", " ", " "] for t in token_sequences_to_ignore: UpperCAmelCase__ : List[Any] = " ".join(text.split(snake_case ) ) return text def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> str: '''simple docstring''' UpperCAmelCase__ : Optional[int] = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case ) # for testing: only process the first two examples as a test # dataset = dataset.select(range(10)) # load processor UpperCAmelCase__ : List[Any] = AutoFeatureExtractor.from_pretrained(args.model_id ) UpperCAmelCase__ : str = feature_extractor.sampling_rate # resample audio UpperCAmelCase__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case ) ) # load eval pipeline if args.device is None: UpperCAmelCase__ : List[str] = 0 if torch.cuda.is_available() else -1 UpperCAmelCase__ : Optional[int] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device ) # map function to decode audio def map_to_pred(snake_case : Any ): UpperCAmelCase__ : List[str] = asr( batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s ) UpperCAmelCase__ : List[Any] = prediction["text"] UpperCAmelCase__ : Optional[int] = normalize_text(batch["sentence"] ) return batch # run inference on all examples UpperCAmelCase__ : Dict = dataset.map(snake_case , remove_columns=dataset.column_names ) # compute and log_results # do not change function below log_results(snake_case , snake_case ) if __name__ == "__main__": _lowerCAmelCase : Any = argparse.ArgumentParser() parser.add_argument( """--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers""" ) parser.add_argument( """--dataset""", type=str, required=True, help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""", ) parser.add_argument( """--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice""" ) parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""") parser.add_argument( """--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds.""" ) parser.add_argument( """--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second.""" ) parser.add_argument( """--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis.""" ) parser.add_argument( """--device""", type=int, default=None, help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""", ) _lowerCAmelCase : Tuple = parser.parse_args() main(args)
298
0
"""simple docstring""" import os from shutil import copyfile from typing import List, Optional, Tuple from ...tokenization_utils import AddedToken from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import is_sentencepiece_available, logging if is_sentencepiece_available(): from .tokenization_barthez import BarthezTokenizer else: _lowerCAmelCase : Optional[int] = None _lowerCAmelCase : List[str] = logging.get_logger(__name__) _lowerCAmelCase : Union[str, Any] = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""} _lowerCAmelCase : List[str] = { """vocab_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model""" ), }, """tokenizer_file""": { """moussaKam/mbarthez""": """https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json""", """moussaKam/barthez""": """https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json""", """moussaKam/barthez-orangesum-title""": ( """https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json""" ), }, } _lowerCAmelCase : List[str] = { """moussaKam/mbarthez""": 1_024, """moussaKam/barthez""": 1_024, """moussaKam/barthez-orangesum-title""": 1_024, } _lowerCAmelCase : List[str] = """▁""" class lowerCAmelCase__ ( __SCREAMING_SNAKE_CASE ): SCREAMING_SNAKE_CASE_ =VOCAB_FILES_NAMES SCREAMING_SNAKE_CASE_ =PRETRAINED_VOCAB_FILES_MAP SCREAMING_SNAKE_CASE_ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES SCREAMING_SNAKE_CASE_ =["input_ids", "attention_mask"] SCREAMING_SNAKE_CASE_ =BarthezTokenizer def __init__( self : List[str] , snake_case__ : Optional[Any]=None , snake_case__ : int=None , snake_case__ : Optional[int]="<s>" , snake_case__ : int="</s>" , snake_case__ : Union[str, Any]="</s>" , snake_case__ : Optional[int]="<s>" , snake_case__ : int="<unk>" , snake_case__ : str="<pad>" , snake_case__ : Optional[Any]="<mask>" , **snake_case__ : Tuple , ): '''simple docstring''' # Mask token behave like a normal word, i.e. include the space before it UpperCAmelCase__ : Dict = AddedToken(_snake_case , lstrip=_snake_case , rstrip=_snake_case ) if isinstance(_snake_case , _snake_case ) else mask_token super().__init__( _snake_case , tokenizer_file=_snake_case , bos_token=_snake_case , eos_token=_snake_case , unk_token=_snake_case , sep_token=_snake_case , cls_token=_snake_case , pad_token=_snake_case , mask_token=_snake_case , **_snake_case , ) UpperCAmelCase__ : Tuple = vocab_file UpperCAmelCase__ : Optional[Any] = False if not self.vocab_file else True def __a ( self : List[Any] , snake_case__ : str , snake_case__ : List[str] = None ): '''simple docstring''' if token_ids_a is None: return [self.cls_token_id] + token_ids_a + [self.sep_token_id] UpperCAmelCase__ : Dict = [self.cls_token_id] UpperCAmelCase__ : str = [self.sep_token_id] return cls + token_ids_a + sep + sep + token_ids_a + sep def __a ( self : Any , snake_case__ : Optional[Any] , snake_case__ : int = None ): '''simple docstring''' UpperCAmelCase__ : Union[str, Any] = [self.sep_token_id] UpperCAmelCase__ : List[Any] = [self.cls_token_id] if token_ids_a is None: return len(cls + token_ids_a + sep ) * [0] return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] def __a ( self : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] = None ): '''simple docstring''' if not self.can_save_slow_tokenizer: raise ValueError( "Your fast tokenizer does not have the necessary information to save the vocabulary for a slow " "tokenizer." ) if not os.path.isdir(_snake_case ): logger.error(f'Vocabulary path ({save_directory}) should be a directory' ) return UpperCAmelCase__ : Any = os.path.join( _snake_case , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file ) != os.path.abspath(_snake_case ): copyfile(self.vocab_file , _snake_case ) return (out_vocab_file,)
351
"""simple docstring""" import inspect import unittest from datasets import load_dataset from packaging import version from transformers import BeitConfig from transformers.models.auto import get_values from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( MODEL_MAPPING, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation, BeitModel, ) from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): import PIL from PIL import Image from transformers import BeitImageProcessor class lowerCAmelCase__ : def __init__( self : Any , snake_case__ : Union[str, Any] , snake_case__ : str=1_0_0 , snake_case__ : str=1_3 , snake_case__ : Optional[int]=3_0 , snake_case__ : List[Any]=2 , snake_case__ : Any=3 , snake_case__ : Union[str, Any]=True , snake_case__ : List[Any]=True , snake_case__ : Any=3_2 , snake_case__ : List[str]=4 , snake_case__ : Any=4 , snake_case__ : Dict=3_7 , snake_case__ : str="gelu" , snake_case__ : Union[str, Any]=0.1 , snake_case__ : int=0.1 , snake_case__ : List[Any]=1_0 , snake_case__ : Any=0.02 , snake_case__ : List[str]=3 , snake_case__ : Tuple=None , snake_case__ : Tuple=[0, 1, 2, 3] , ): '''simple docstring''' UpperCAmelCase__ : int = parent UpperCAmelCase__ : List[str] = 1_0_0 UpperCAmelCase__ : List[Any] = batch_size UpperCAmelCase__ : int = image_size UpperCAmelCase__ : List[Any] = patch_size UpperCAmelCase__ : List[Any] = num_channels UpperCAmelCase__ : Any = is_training UpperCAmelCase__ : str = use_labels UpperCAmelCase__ : Any = hidden_size UpperCAmelCase__ : Dict = num_hidden_layers UpperCAmelCase__ : int = num_attention_heads UpperCAmelCase__ : Tuple = intermediate_size UpperCAmelCase__ : Any = hidden_act UpperCAmelCase__ : Optional[int] = hidden_dropout_prob UpperCAmelCase__ : str = attention_probs_dropout_prob UpperCAmelCase__ : Optional[int] = type_sequence_label_size UpperCAmelCase__ : Any = initializer_range UpperCAmelCase__ : Any = scope UpperCAmelCase__ : Optional[Any] = out_indices UpperCAmelCase__ : int = num_labels # in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) UpperCAmelCase__ : List[Any] = (image_size // patch_size) ** 2 UpperCAmelCase__ : Optional[int] = num_patches + 1 def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] ) UpperCAmelCase__ : str = None UpperCAmelCase__ : Optional[int] = None if self.use_labels: UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size ) UpperCAmelCase__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels ) UpperCAmelCase__ : Tuple = self.get_config() return config, pixel_values, labels, pixel_labels def __a ( self : int ): '''simple docstring''' return BeitConfig( vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , out_indices=self.out_indices , ) def __a ( self : int , snake_case__ : str , snake_case__ : str , snake_case__ : Dict , snake_case__ : List[str] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModel(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Dict = model(snake_case__ ) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) ) def __a ( self : Any , snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Any ): '''simple docstring''' UpperCAmelCase__ : int = BeitForMaskedImageModeling(config=snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : List[Any] = model(snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) ) def __a ( self : Optional[Any] , snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : str , snake_case__ : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : List[Any] = self.type_sequence_label_size UpperCAmelCase__ : Union[str, Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Union[str, Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) # test greyscale images UpperCAmelCase__ : Any = 1 UpperCAmelCase__ : List[Any] = BeitForImageClassification(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] ) UpperCAmelCase__ : Optional[Any] = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) ) def __a ( self : Union[str, Any] , snake_case__ : int , snake_case__ : str , snake_case__ : Any , snake_case__ : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.num_labels UpperCAmelCase__ : int = BeitForSemanticSegmentation(snake_case__ ) model.to(snake_case__ ) model.eval() UpperCAmelCase__ : int = model(snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) UpperCAmelCase__ : Dict = model(snake_case__ , labels=snake_case__ ) self.parent.assertEqual( result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) ) def __a ( self : int ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.prepare_config_and_inputs() UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = config_and_inputs UpperCAmelCase__ : Any = {"pixel_values": pixel_values} return config, inputs_dict @require_torch class lowerCAmelCase__ ( __magic_name__ , __magic_name__ , unittest.TestCase ): SCREAMING_SNAKE_CASE_ =( (BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation) if is_torch_available() else () ) SCREAMING_SNAKE_CASE_ =( { '''feature-extraction''': BeitModel, '''image-classification''': BeitForImageClassification, '''image-segmentation''': BeitForSemanticSegmentation, } if is_torch_available() else {} ) SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False SCREAMING_SNAKE_CASE_ =False def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitModelTester(self ) UpperCAmelCase__ : List[str] = ConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 ) def __a ( self : List[str] ): '''simple docstring''' self.config_tester.run_common_tests() @unittest.skip(reason="BEiT does not use inputs_embeds" ) def __a ( self : List[Any] ): '''simple docstring''' pass @require_torch_multi_gpu @unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" ) def __a ( self : List[str] ): '''simple docstring''' pass def __a ( self : Tuple ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : Dict = model_class(snake_case__ ) self.assertIsInstance(model.get_input_embeddings() , (nn.Module) ) UpperCAmelCase__ : Tuple = model.get_output_embeddings() self.assertTrue(x is None or isinstance(snake_case__ , nn.Linear ) ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(snake_case__ ) UpperCAmelCase__ : int = inspect.signature(model.forward ) # signature.parameters is an OrderedDict => so arg_names order is deterministic UpperCAmelCase__ : str = [*signature.parameters.keys()] UpperCAmelCase__ : int = ["pixel_values"] self.assertListEqual(arg_names[:1] , snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*snake_case__ ) def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*snake_case__ ) def __a ( self : Dict ): '''simple docstring''' UpperCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ ) def __a ( self : List[Any] ): '''simple docstring''' if not self.model_tester.is_training: return UpperCAmelCase__ , UpperCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Optional[int] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling]: continue UpperCAmelCase__ : Optional[Any] = model_class(snake_case__ ) model.to(snake_case__ ) model.train() UpperCAmelCase__ : Optional[int] = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Tuple = model(**snake_case__ ).loss loss.backward() def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() if not self.model_tester.is_training: return UpperCAmelCase__ : Optional[int] = False UpperCAmelCase__ : List[str] = True for model_class in self.all_model_classes: # we don't test BeitForMaskedImageModeling if ( model_class in [*get_values(snake_case__ ), BeitForMaskedImageModeling] or not model_class.supports_gradient_checkpointing ): continue UpperCAmelCase__ : List[Any] = model_class(snake_case__ ) model.gradient_checkpointing_enable() model.to(snake_case__ ) model.train() UpperCAmelCase__ : Dict = self._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ ) UpperCAmelCase__ : Optional[Any] = model(**snake_case__ ).loss loss.backward() def __a ( self : str ): '''simple docstring''' UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common() UpperCAmelCase__ : Union[str, Any] = _config_zero_init(snake_case__ ) for model_class in self.all_model_classes: UpperCAmelCase__ : int = model_class(config=snake_case__ ) for name, param in model.named_parameters(): # we skip lambda parameters as these require special initial values # determined by config.layer_scale_init_value if "lambda" in name: continue if param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'Parameter {name} of model {model_class} seems not properly initialized' , ) @slow def __a ( self : Any ): '''simple docstring''' for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: UpperCAmelCase__ : Optional[Any] = BeitModel.from_pretrained(snake_case__ ) self.assertIsNotNone(snake_case__ ) def SCREAMING_SNAKE_CASE__ ( )-> Optional[Any]: '''simple docstring''' UpperCAmelCase__ : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ) return image @require_torch @require_vision class lowerCAmelCase__ ( unittest.TestCase ): @cached_property def __a ( self : Union[str, Any] ): '''simple docstring''' return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : Optional[Any] = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(snake_case__ ) UpperCAmelCase__ : int = self.default_image_processor UpperCAmelCase__ : List[Any] = prepare_img() UpperCAmelCase__ : Dict = image_processor(images=snake_case__ , return_tensors="pt" ).pixel_values.to(snake_case__ ) # prepare bool_masked_pos UpperCAmelCase__ : Union[str, Any] = torch.ones((1, 1_9_6) , dtype=torch.bool ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(pixel_values=snake_case__ , bool_masked_pos=snake_case__ ) UpperCAmelCase__ : str = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 1_9_6, 8_1_9_2) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Any = torch.tensor( [[-3.2437, 0.5072, -13.9174], [-3.2456, 0.4948, -13.9401], [-3.2033, 0.5121, -13.8550]] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , snake_case__ , atol=1e-2 ) ) @slow def __a ( self : Optional[Any] ): '''simple docstring''' UpperCAmelCase__ : Tuple = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Dict = prepare_img() UpperCAmelCase__ : Tuple = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Union[str, Any] = model(**snake_case__ ) UpperCAmelCase__ : Any = outputs.logits # verify the logits UpperCAmelCase__ : Optional[Any] = torch.Size((1, 1_0_0_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : Optional[Any] = torch.tensor([-1.2385, -1.0987, -1.0108] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : List[str] = 2_8_1 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Optional[int] ): '''simple docstring''' UpperCAmelCase__ : int = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to( snake_case__ ) UpperCAmelCase__ : Tuple = self.default_image_processor UpperCAmelCase__ : Any = prepare_img() UpperCAmelCase__ : Union[str, Any] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[Any] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits # verify the logits UpperCAmelCase__ : int = torch.Size((1, 2_1_8_4_1) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : int = torch.tensor([1.6881, -0.2787, 0.5901] ).to(snake_case__ ) self.assertTrue(torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 ) ) UpperCAmelCase__ : Any = 2_3_9_6 self.assertEqual(logits.argmax(-1 ).item() , snake_case__ ) @slow def __a ( self : Union[str, Any] ): '''simple docstring''' UpperCAmelCase__ : Dict = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : List[Any] = model.to(snake_case__ ) UpperCAmelCase__ : int = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Any = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : List[Any] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : str = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : List[str] = model(**snake_case__ ) UpperCAmelCase__ : Dict = outputs.logits # verify the logits UpperCAmelCase__ : Any = torch.Size((1, 1_5_0, 1_6_0, 1_6_0) ) self.assertEqual(logits.shape , snake_case__ ) UpperCAmelCase__ : List[str] = version.parse(PIL.__version__ ) < version.parse("9.0.0" ) if is_pillow_less_than_a: UpperCAmelCase__ : Optional[Any] = torch.tensor( [ [[-4.9225, -2.3954, -3.0522], [-2.8822, -1.0046, -1.7561], [-2.9549, -1.3228, -2.1347]], [[-5.8168, -3.4129, -4.0778], [-3.8651, -2.2214, -3.0277], [-3.8356, -2.4643, -3.3535]], [[-0.0078, 3.9952, 4.0754], [2.9856, 4.6944, 5.0035], [3.2413, 4.7813, 4.9969]], ] , device=snake_case__ , ) else: UpperCAmelCase__ : int = torch.tensor( [ [[-4.8960, -2.3688, -3.0355], [-2.8478, -0.9836, -1.7418], [-2.9449, -1.3332, -2.1456]], [[-5.8081, -3.4124, -4.1006], [-3.8561, -2.2081, -3.0323], [-3.8365, -2.4601, -3.3669]], [[-0.0309, 3.9868, 4.0540], [2.9640, 4.6877, 4.9976], [3.2081, 4.7690, 4.9942]], ] , device=snake_case__ , ) self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 ) ) @slow def __a ( self : Any ): '''simple docstring''' UpperCAmelCase__ : str = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" ) UpperCAmelCase__ : Any = model.to(snake_case__ ) UpperCAmelCase__ : Dict = BeitImageProcessor(do_resize=snake_case__ , size=6_4_0 , do_center_crop=snake_case__ ) UpperCAmelCase__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" ) UpperCAmelCase__ : Optional[int] = Image.open(ds[0]["file"] ) UpperCAmelCase__ : Optional[int] = image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ ) # forward pass with torch.no_grad(): UpperCAmelCase__ : Optional[int] = model(**snake_case__ ) UpperCAmelCase__ : int = outputs.logits.detach().cpu() UpperCAmelCase__ : str = image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(5_0_0, 3_0_0)] ) UpperCAmelCase__ : List[Any] = torch.Size((5_0_0, 3_0_0) ) self.assertEqual(segmentation[0].shape , snake_case__ ) UpperCAmelCase__ : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case__ ) UpperCAmelCase__ : int = torch.Size((1_6_0, 1_6_0) ) self.assertEqual(segmentation[0].shape , snake_case__ )
298
0